repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
saltstack/salt
|
salt/utils/vmware.py
|
get_dvss
|
python
|
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
|
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1053-L1090
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_network_folder
|
python
|
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
|
Retrieves the network folder of a datacenter
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1093-L1113
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_dvs
|
python
|
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
|
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1116-L1152
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_network_folder(dc_ref):\n '''\n Retrieves the network folder of a datacenter\n '''\n dc_name = get_managed_object_name(dc_ref)\n log.trace('Retrieving network folder in datacenter \\'%s\\'', dc_name)\n service_instance = get_service_instance_from_managed_object(dc_ref)\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='networkFolder',\n skip=False,\n type=vim.Datacenter)\n entries = get_mors_with_properties(service_instance,\n vim.Folder,\n container_ref=dc_ref,\n property_list=['name'],\n traversal_spec=traversal_spec)\n if not entries:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Network folder in datacenter \\'{0}\\' wasn\\'t retrieved'\n ''.format(dc_name))\n return entries[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
update_dvs
|
python
|
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
|
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1155-L1181
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
set_dvs_network_resource_management_enabled
|
python
|
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1184-L1209
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_dvportgroups
|
python
|
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
|
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1212-L1264
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_uplink_dvportgroup
|
python
|
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
|
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1267-L1293
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_dvportgroup
|
python
|
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
|
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1296-L1323
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
update_dvportgroup
|
python
|
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
|
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1326-L1351
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_networks
|
python
|
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
|
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1379-L1423
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
list_objects
|
python
|
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
|
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1426-L1447
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_license_manager
|
python
|
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
|
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1450-L1472
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_license_assignment_manager
|
python
|
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
|
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1475-L1501
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_licenses
|
python
|
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1504-L1531
|
[
"def get_license_manager(service_instance):\n '''\n Returns the license manager.\n\n service_instance\n The Service Instance Object from which to obrain the license manager.\n '''\n\n log.debug('Retrieving license manager')\n try:\n lic_manager = service_instance.content.licenseManager\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n return lic_manager\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
add_license
|
python
|
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
|
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1534-L1570
|
[
"def get_license_manager(service_instance):\n '''\n Returns the license manager.\n\n service_instance\n The Service Instance Object from which to obrain the license manager.\n '''\n\n log.debug('Retrieving license manager')\n try:\n lic_manager = service_instance.content.licenseManager\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n return lic_manager\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_assigned_licenses
|
python
|
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
|
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1573-L1655
|
[
"def get_license_assignment_manager(service_instance):\n '''\n Returns the license assignment manager.\n\n service_instance\n The Service Instance Object from which to obrain the license manager.\n '''\n\n log.debug('Retrieving license assignment manager')\n try:\n lic_assignment_manager = \\\n service_instance.content.licenseManager.licenseAssignmentManager\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not lic_assignment_manager:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'License assignment manager was not retrieved')\n return lic_assignment_manager\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
assign_license
|
python
|
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
|
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1658-L1728
|
[
"def get_license_assignment_manager(service_instance):\n '''\n Returns the license assignment manager.\n\n service_instance\n The Service Instance Object from which to obrain the license manager.\n '''\n\n log.debug('Retrieving license assignment manager')\n try:\n lic_assignment_manager = \\\n service_instance.content.licenseManager.licenseAssignmentManager\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not lic_assignment_manager:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'License assignment manager was not retrieved')\n return lic_assignment_manager\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_datacenters
|
python
|
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
|
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1741-L1762
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_datacenter
|
python
|
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
|
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1765-L1780
|
[
"def get_datacenters(service_instance, datacenter_names=None,\n get_all_datacenters=False):\n '''\n Returns all datacenters in a vCenter.\n\n service_instance\n The Service Instance Object from which to obtain cluster.\n\n datacenter_names\n List of datacenter names to filter by. Default value is None.\n\n get_all_datacenters\n Flag specifying whether to retrieve all datacenters.\n Default value is None.\n '''\n items = [i['object'] for i in\n get_mors_with_properties(service_instance,\n vim.Datacenter,\n property_list=['name'])\n if get_all_datacenters or\n (datacenter_names and i['name'] in datacenter_names)]\n return items\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_datacenter
|
python
|
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
|
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1783-L1810
|
[
"def get_root_folder(service_instance):\n '''\n Returns the root folder of a vCenter.\n\n service_instance\n The Service Instance Object for which to obtain the root folder.\n '''\n try:\n log.trace('Retrieving root folder')\n return service_instance.RetrieveContent().rootFolder\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_cluster
|
python
|
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
|
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1813-L1846
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_cluster
|
python
|
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1849-L1878
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
update_cluster
|
python
|
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
|
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1881-L1908
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
list_datastores_full
|
python
|
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
|
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1941-L1956
|
[
"def list_objects(service_instance, vim_object, properties=None):\n '''\n Returns a simple list of objects from a given service instance.\n\n service_instance\n The Service Instance for which to obtain a list of objects.\n\n object_type\n The type of content for which to obtain information.\n\n properties\n An optional list of object properties used to return reference results.\n If not provided, defaults to ``name``.\n '''\n if properties is None:\n properties = ['name']\n\n items = []\n item_list = get_mors_with_properties(service_instance, vim_object, properties)\n for item in item_list:\n items.append(item['name'])\n return items\n",
"def list_datastore_full(service_instance, datastore):\n '''\n Returns a dictionary with the basic information for the given datastore:\n name, type, url, capacity, free, used, usage, hosts\n\n service_instance\n The Service Instance Object from which to obtain datastores.\n\n datastore\n Name of the datastore.\n '''\n datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)\n\n if not datastore_object:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datastore \\'{0}\\' does not exist.'.format(datastore)\n )\n\n items = {}\n items['name'] = str(datastore_object.summary.name).replace(\"'\", \"\")\n items['type'] = str(datastore_object.summary.type).replace(\"'\", \"\")\n items['url'] = str(datastore_object.summary.url).replace(\"'\", \"\")\n items['capacity'] = datastore_object.summary.capacity / 1024 / 1024\n items['free'] = datastore_object.summary.freeSpace / 1024 / 1024\n items['used'] = items['capacity'] - items['free']\n items['usage'] = (float(items['used']) / float(items['capacity'])) * 100\n items['hosts'] = []\n\n for host in datastore_object.host:\n host_key = str(host.key).replace(\"'\", \"\").split(\":\", 1)[1]\n host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)\n items['hosts'].append(host_object.name)\n\n return items\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
list_datastore_full
|
python
|
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
|
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1959-L1992
|
[
"def get_mor_by_name(si, obj_type, obj_name):\n '''\n Get reference to an object of specified object type and name\n\n si\n ServiceInstance for the vSphere or ESXi server (see get_service_instance)\n\n obj_type\n Type of the object (vim.StoragePod, vim.Datastore, etc)\n\n obj_name\n Name of the object\n '''\n inventory = get_inventory(si)\n container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)\n for item in container.view:\n if item.name == obj_name:\n return item\n return None\n",
"def get_mor_by_moid(si, obj_type, obj_moid):\n '''\n Get reference to an object of specified object type and id\n\n si\n ServiceInstance for the vSphere or ESXi server (see get_service_instance)\n\n obj_type\n Type of the object (vim.StoragePod, vim.Datastore, etc)\n\n obj_moid\n ID of the object\n '''\n inventory = get_inventory(si)\n container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)\n for item in container.view:\n if item._moId == obj_moid:\n return item\n return None\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_mor_by_name
|
python
|
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
|
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L1995-L2013
|
[
"def get_inventory(service_instance):\n '''\n Return the inventory of a Service Instance Object.\n\n service_instance\n The Service Instance Object for which to obtain inventory.\n '''\n return service_instance.RetrieveContent()\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_mor_by_moid
|
python
|
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
|
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2016-L2034
|
[
"def get_inventory(service_instance):\n '''\n Return the inventory of a Service Instance Object.\n\n service_instance\n The Service Instance Object for which to obtain inventory.\n '''\n return service_instance.RetrieveContent()\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_datastore_files
|
python
|
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
|
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2037-L2081
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_datastores(service_instance, reference, datastore_names=None,\n backing_disk_ids=None, get_all_datastores=False):\n '''\n Returns a list of vim.Datastore objects representing the datastores visible\n from a VMware object, filtered by their names, or the backing disk\n cannonical name or scsi_addresses\n\n service_instance\n The Service Instance Object from which to obtain datastores.\n\n reference\n The VMware object from which the datastores are visible.\n\n datastore_names\n The list of datastore names to be retrieved. Default value is None.\n\n backing_disk_ids\n The list of canonical names of the disks backing the datastores\n to be retrieved. Only supported if reference is a vim.HostSystem.\n Default value is None\n\n get_all_datastores\n Specifies whether to retrieve all disks in the host.\n Default value is False.\n '''\n obj_name = get_managed_object_name(reference)\n if get_all_datastores:\n log.trace('Retrieving all datastores visible to \\'%s\\'', obj_name)\n else:\n log.trace('Retrieving datastores visible to \\'%s\\': names = (%s); '\n 'backing disk ids = (%s)',\n obj_name, datastore_names, backing_disk_ids)\n if backing_disk_ids and not isinstance(reference, vim.HostSystem):\n\n raise salt.exceptions.ArgumentValueError(\n 'Unsupported reference type \\'{0}\\' when backing disk filter '\n 'is set'.format(reference.__class__.__name__))\n if (not get_all_datastores) and backing_disk_ids:\n # At this point we know the reference is a vim.HostSystem\n log.trace('Filtering datastores with backing disk ids: %s',\n backing_disk_ids)\n storage_system = get_storage_system(service_instance, reference,\n obj_name)\n props = salt.utils.vmware.get_properties_of_managed_object(\n storage_system, ['fileSystemVolumeInfo.mountInfo'])\n mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])\n disk_datastores = []\n # Non vmfs volumes aren't backed by a disk\n for vol in [i.volume for i in mount_infos if\n isinstance(i.volume, vim.HostVmfsVolume)]:\n\n if not [e for e in vol.extent if e.diskName in backing_disk_ids]:\n # Skip volume if it doesn't contain an extent with a\n # canonical name of interest\n continue\n log.trace('Found datastore \\'%s\\' for disk id(s) \\'%s\\'',\n vol.name, [e.diskName for e in vol.extent])\n disk_datastores.append(vol.name)\n log.trace('Datastore found for disk filter: %s', disk_datastores)\n if datastore_names:\n datastore_names.extend(disk_datastores)\n else:\n datastore_names = disk_datastores\n\n if (not get_all_datastores) and (not datastore_names):\n log.trace('No datastore to be filtered after retrieving the datastores '\n 'backed by the disk id(s) \\'%s\\'', backing_disk_ids)\n return []\n\n log.trace('datastore_names = %s', datastore_names)\n\n # Use the default traversal spec\n if isinstance(reference, vim.HostSystem):\n # Create a different traversal spec for hosts because it looks like the\n # default doesn't retrieve the datastores\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name='host_datastore_traversal',\n path='datastore',\n skip=False,\n type=vim.HostSystem)\n elif isinstance(reference, vim.ClusterComputeResource):\n # Traversal spec for clusters\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name='cluster_datastore_traversal',\n path='datastore',\n skip=False,\n type=vim.ClusterComputeResource)\n elif isinstance(reference, vim.Datacenter):\n # Traversal spec for datacenter\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name='datacenter_datastore_traversal',\n path='datastore',\n skip=False,\n type=vim.Datacenter)\n elif isinstance(reference, vim.StoragePod):\n # Traversal spec for datastore clusters\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n name='datastore_cluster_traversal',\n path='childEntity',\n skip=False,\n type=vim.StoragePod)\n elif isinstance(reference, vim.Folder) and \\\n get_managed_object_name(reference) == 'Datacenters':\n # Traversal of root folder (doesn't support multiple levels of Folders)\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='childEntity',\n selectSet=[\n vmodl.query.PropertyCollector.TraversalSpec(\n path='datastore',\n skip=False,\n type=vim.Datacenter)],\n skip=False,\n type=vim.Folder)\n else:\n raise salt.exceptions.ArgumentValueError(\n 'Unsupported reference type \\'{0}\\''\n ''.format(reference.__class__.__name__))\n\n items = get_mors_with_properties(service_instance,\n object_type=vim.Datastore,\n property_list=['name'],\n container_ref=reference,\n traversal_spec=traversal_spec)\n log.trace('Retrieved %s datastores', len(items))\n items = [i for i in items if get_all_datastores or i['name'] in\n datastore_names]\n log.trace('Filtered datastores: %s', [i['name'] for i in items])\n return [i['object'] for i in items]\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_datastores
|
python
|
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
|
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2084-L2211
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
rename_datastore
|
python
|
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2214-L2238
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
_get_partition_info
|
python
|
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
|
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2266-L2287
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
_get_new_computed_partition_spec
|
python
|
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
|
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2290-L2350
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_vmfs_datastore
|
python
|
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
|
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2353-L2412
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n",
"def _get_partition_info(storage_system, device_path):\n '''\n Returns partition informations for a device path, of type\n vim.HostDiskPartitionInfo\n '''\n try:\n partition_infos = \\\n storage_system.RetrieveDiskPartitionInfo(\n devicePath=[device_path])\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n log.trace('partition_info = %s', partition_infos[0])\n return partition_infos[0]\n",
"def _get_new_computed_partition_spec(storage_system,\n device_path,\n partition_info):\n '''\n Computes the new disk partition info when adding a new vmfs partition that\n uses up the remainder of the disk; returns a tuple\n (new_partition_number, vim.HostDiskPartitionSpec\n '''\n log.trace('Adding a partition at the end of the disk and getting the new '\n 'computed partition spec')\n # TODO implement support for multiple partitions\n # We support adding a partition add the end of the disk with partitions\n free_partitions = [p for p in partition_info.layout.partition\n if p.type == 'none']\n if not free_partitions:\n raise salt.exceptions.VMwareObjectNotFoundError(\n 'Free partition was not found on device \\'{0}\\''\n ''.format(partition_info.deviceName))\n free_partition = free_partitions[0]\n\n # Create a layout object that copies the existing one\n layout = vim.HostDiskPartitionLayout(\n total=partition_info.layout.total,\n partition=partition_info.layout.partition)\n # Create a partition with the free space on the disk\n # Change the free partition type to vmfs\n free_partition.type = 'vmfs'\n try:\n computed_partition_info = storage_system.ComputeDiskPartitionInfo(\n devicePath=device_path,\n partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,\n layout=layout)\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n log.trace('computed partition info = {0}', computed_partition_info)\n log.trace('Retrieving new partition number')\n partition_numbers = [p.partition for p in\n computed_partition_info.layout.partition\n if (p.start.block == free_partition.start.block or\n # XXX If the entire disk is free (i.e. the free\n # disk partition starts at block 0) the newily\n # created partition is created from block 1\n (free_partition.start.block == 0 and\n p.start.block == 1)) and\n p.end.block == free_partition.end.block and\n p.type == 'vmfs']\n if not partition_numbers:\n raise salt.exceptions.VMwareNotFoundError(\n 'New partition was not found in computed partitions of device '\n '\\'{0}\\''.format(partition_info.deviceName))\n log.trace('new partition number = %s', partition_numbers[0])\n return (partition_numbers[0], computed_partition_info.spec)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_host_datastore_system
|
python
|
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
|
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2415-L2443
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
remove_datastore
|
python
|
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
|
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2446-L2481
|
[
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_host_datastore_system(host_ref, hostname=None):\n '''\n Returns a host's datastore system\n\n host_ref\n Reference to the ESXi host\n\n hostname\n Name of the host. This argument is optional.\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n service_instance = get_service_instance_from_managed_object(host_ref)\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.datastoreSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostDatastoreSystem,\n property_list=['datastore'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' datastore system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved datastore system', hostname)\n return objs[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_hosts
|
python
|
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
|
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2484-L2547
|
[
"def get_datacenter(service_instance, datacenter_name):\n '''\n Returns a vim.Datacenter managed object.\n\n service_instance\n The Service Instance Object from which to obtain datacenter.\n\n datacenter_name\n The datacenter name\n '''\n items = get_datacenters(service_instance,\n datacenter_names=[datacenter_name])\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datacenter \\'{0}\\' was not found'.format(datacenter_name))\n return items[0]\n",
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_root_folder(service_instance):\n '''\n Returns the root folder of a vCenter.\n\n service_instance\n The Service Instance Object for which to obtain the root folder.\n '''\n try:\n log.trace('Retrieving root folder')\n return service_instance.RetrieveContent().rootFolder\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
_get_scsi_address_to_lun_key_map
|
python
|
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
|
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2550-L2610
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_all_luns
|
python
|
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
|
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2613-L2660
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_scsi_address_to_lun_map
|
python
|
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
|
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2663-L2689
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n",
"def _get_scsi_address_to_lun_key_map(service_instance,\n host_ref,\n storage_system=None,\n hostname=None):\n '''\n Returns a map between the scsi addresses and the keys of all luns on an ESXi\n host.\n map[<scsi_address>] = <lun key>\n\n service_instance\n The Service Instance Object from which to obtain the hosts\n\n host_ref\n The vim.HostSystem object representing the host that contains the\n requested disks.\n\n storage_system\n The host's storage system. Default is None.\n\n hostname\n Name of the host. Default is None.\n '''\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n if not storage_system:\n storage_system = get_storage_system(service_instance, host_ref,\n hostname)\n try:\n device_info = storage_system.storageDeviceInfo\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not device_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage device '\n 'info was not retrieved'.format(hostname))\n multipath_info = device_info.multipathInfo\n if not multipath_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' multipath info was not retrieved'\n ''.format(hostname))\n if multipath_info.lun is None:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'No luns were retrieved from host \\'{0}\\''.format(hostname))\n lun_key_by_scsi_addr = {}\n for l in multipath_info.lun:\n # The vmware scsi_address may have multiple comma separated values\n # The first one is the actual scsi address\n lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun\n for p in l.path})\n log.trace('Scsi address to lun id map on host \\'%s\\': %s',\n hostname, lun_key_by_scsi_addr)\n return lun_key_by_scsi_addr\n",
"def get_all_luns(host_ref, storage_system=None, hostname=None):\n '''\n Returns a list of all vim.HostScsiDisk objects in a disk\n\n host_ref\n The vim.HostSystem object representing the host that contains the\n requested disks.\n\n storage_system\n The host's storage system. Default is None.\n\n hostname\n Name of the host. This argument is optional.\n '''\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n if not storage_system:\n si = get_service_instance_from_managed_object(host_ref, name=hostname)\n storage_system = get_storage_system(si, host_ref, hostname)\n if not storage_system:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n try:\n device_info = storage_system.storageDeviceInfo\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not device_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage device info was not retrieved'\n ''.format(hostname))\n\n scsi_luns = device_info.scsiLun\n if scsi_luns:\n log.trace('Retrieved scsi luns in host \\'%s\\': %s',\n hostname, [l.canonicalName for l in scsi_luns])\n return scsi_luns\n log.trace('Retrieved no scsi_luns in host \\'%s\\'', hostname)\n return []\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_disks
|
python
|
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
|
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2692-L2745
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n",
"def _get_scsi_address_to_lun_key_map(service_instance,\n host_ref,\n storage_system=None,\n hostname=None):\n '''\n Returns a map between the scsi addresses and the keys of all luns on an ESXi\n host.\n map[<scsi_address>] = <lun key>\n\n service_instance\n The Service Instance Object from which to obtain the hosts\n\n host_ref\n The vim.HostSystem object representing the host that contains the\n requested disks.\n\n storage_system\n The host's storage system. Default is None.\n\n hostname\n Name of the host. Default is None.\n '''\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n if not storage_system:\n storage_system = get_storage_system(service_instance, host_ref,\n hostname)\n try:\n device_info = storage_system.storageDeviceInfo\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not device_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage device '\n 'info was not retrieved'.format(hostname))\n multipath_info = device_info.multipathInfo\n if not multipath_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' multipath info was not retrieved'\n ''.format(hostname))\n if multipath_info.lun is None:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'No luns were retrieved from host \\'{0}\\''.format(hostname))\n lun_key_by_scsi_addr = {}\n for l in multipath_info.lun:\n # The vmware scsi_address may have multiple comma separated values\n # The first one is the actual scsi address\n lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun\n for p in l.path})\n log.trace('Scsi address to lun id map on host \\'%s\\': %s',\n hostname, lun_key_by_scsi_addr)\n return lun_key_by_scsi_addr\n",
"def get_all_luns(host_ref, storage_system=None, hostname=None):\n '''\n Returns a list of all vim.HostScsiDisk objects in a disk\n\n host_ref\n The vim.HostSystem object representing the host that contains the\n requested disks.\n\n storage_system\n The host's storage system. Default is None.\n\n hostname\n Name of the host. This argument is optional.\n '''\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n if not storage_system:\n si = get_service_instance_from_managed_object(host_ref, name=hostname)\n storage_system = get_storage_system(si, host_ref, hostname)\n if not storage_system:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n try:\n device_info = storage_system.storageDeviceInfo\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if not device_info:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage device info was not retrieved'\n ''.format(hostname))\n\n scsi_luns = device_info.scsiLun\n if scsi_luns:\n log.trace('Retrieved scsi luns in host \\'%s\\': %s',\n hostname, [l.canonicalName for l in scsi_luns])\n return scsi_luns\n log.trace('Retrieved no scsi_luns in host \\'%s\\'', hostname)\n return []\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_disk_partition_info
|
python
|
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
|
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2748-L2790
|
[
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n",
"def _get_partition_info(storage_system, device_path):\n '''\n Returns partition informations for a device path, of type\n vim.HostDiskPartitionInfo\n '''\n try:\n partition_infos = \\\n storage_system.RetrieveDiskPartitionInfo(\n devicePath=[device_path])\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{0}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n log.trace('partition_info = %s', partition_infos[0])\n return partition_infos[0]\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
erase_disk_partitions
|
python
|
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
|
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2793-L2864
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_storage_system(service_instance, host_ref, hostname=None):\n '''\n Returns a host's storage system\n '''\n\n if not hostname:\n hostname = get_managed_object_name(host_ref)\n\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='configManager.storageSystem',\n type=vim.HostSystem,\n skip=False)\n objs = get_mors_with_properties(service_instance,\n vim.HostStorageSystem,\n property_list=['systemFile'],\n container_ref=host_ref,\n traversal_spec=traversal_spec)\n if not objs:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Host\\'s \\'{0}\\' storage system was not retrieved'\n ''.format(hostname))\n log.trace('[%s] Retrieved storage system', hostname)\n return objs[0]['object']\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_diskgroups
|
python
|
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
|
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2867-L2924
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
_check_disks_in_diskgroup
|
python
|
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
|
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2927-L2944
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_host_cache
|
python
|
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
|
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2948-L2985
|
[
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n",
"def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):\n '''\n Retrieves the service instance from a managed object.\n\n me_ref\n Reference to a managed object (of type vim.ManagedEntity).\n\n name\n Name of managed object. This field is optional.\n '''\n if not name:\n name = mo_ref.name\n log.trace('[%s] Retrieving service instance from managed object', name)\n si = vim.ServiceInstance('ServiceInstance')\n si._stub = mo_ref._stub\n return si\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
configure_host_cache
|
python
|
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
|
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L2989-L3040
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_resource_pools
|
python
|
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
|
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3053-L3097
|
[
"def get_datacenter(service_instance, datacenter_name):\n '''\n Returns a vim.Datacenter managed object.\n\n service_instance\n The Service Instance Object from which to obtain datacenter.\n\n datacenter_name\n The datacenter name\n '''\n items = get_datacenters(service_instance,\n datacenter_names=[datacenter_name])\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datacenter \\'{0}\\' was not found'.format(datacenter_name))\n return items[0]\n",
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_root_folder(service_instance):\n '''\n Returns the root folder of a vCenter.\n\n service_instance\n The Service Instance Object for which to obtain the root folder.\n '''\n try:\n log.trace('Retrieving root folder')\n return service_instance.RetrieveContent().rootFolder\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
wait_for_task
|
python
|
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
|
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3170-L3270
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_vm_by_property
|
python
|
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
|
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3273-L3326
|
[
"def get_datacenter(service_instance, datacenter_name):\n '''\n Returns a vim.Datacenter managed object.\n\n service_instance\n The Service Instance Object from which to obtain datacenter.\n\n datacenter_name\n The datacenter name\n '''\n items = get_datacenters(service_instance,\n datacenter_names=[datacenter_name])\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datacenter \\'{0}\\' was not found'.format(datacenter_name))\n return items[0]\n",
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_folder
|
python
|
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
|
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3329-L3369
|
[
"def get_datacenter(service_instance, datacenter_name):\n '''\n Returns a vim.Datacenter managed object.\n\n service_instance\n The Service Instance Object from which to obtain datacenter.\n\n datacenter_name\n The datacenter name\n '''\n items = get_datacenters(service_instance,\n datacenter_names=[datacenter_name])\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datacenter \\'{0}\\' was not found'.format(datacenter_name))\n return items[0]\n",
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,\n traversal_spec=None, parent_ref=None):\n '''\n Get virtual machine properties based on the traversal specs and properties list,\n returns Virtual Machine object with properties.\n\n service_instance\n Service instance object to access vCenter\n\n name\n Name of the virtual machine.\n\n datacenter\n Datacenter name\n\n vm_properties\n List of vm properties.\n\n traversal_spec\n Traversal Spec object(s) for searching.\n\n parent_ref\n Container Reference object for searching under a given object.\n '''\n if datacenter and not parent_ref:\n parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)\n if not vm_properties:\n vm_properties = ['name',\n 'config.hardware.device',\n 'summary.storage.committed',\n 'summary.storage.uncommitted',\n 'summary.storage.unshared',\n 'layoutEx.file',\n 'config.guestFullName',\n 'config.guestId',\n 'guest.net',\n 'config.hardware.memoryMB',\n 'config.hardware.numCPU',\n 'config.files.vmPathName',\n 'summary.runtime.powerState',\n 'guest.toolsStatus']\n vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,\n vim.VirtualMachine,\n vm_properties,\n container_ref=parent_ref,\n traversal_spec=traversal_spec)\n vm_formatted = [vm for vm in vm_list if vm['name'] == name]\n if not vm_formatted:\n raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')\n elif len(vm_formatted) > 1:\n raise salt.exceptions.VMwareMultipleObjectsError(' '.join([\n 'Multiple virtual machines were found with the'\n 'same name, please specify a container.']))\n return vm_formatted[0]\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
get_placement
|
python
|
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
|
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3372-L3451
|
[
"def get_hosts(service_instance, datacenter_name=None, host_names=None,\n cluster_name=None, get_all_hosts=False):\n '''\n Returns a list of vim.HostSystem objects representing ESXi hosts\n in a vcenter filtered by their names and/or datacenter, cluster membership.\n\n service_instance\n The Service Instance Object from which to obtain the hosts.\n\n datacenter_name\n The datacenter name. Default is None.\n\n host_names\n The host_names to be retrieved. Default is None.\n\n cluster_name\n The cluster name - used to restrict the hosts retrieved. Only used if\n the datacenter is set. This argument is optional.\n\n get_all_hosts\n Specifies whether to retrieve all hosts in the container.\n Default value is False.\n '''\n properties = ['name']\n if cluster_name and not datacenter_name:\n raise salt.exceptions.ArgumentValueError(\n 'Must specify the datacenter when specifying the cluster')\n if not host_names:\n host_names = []\n if not datacenter_name:\n # Assume the root folder is the starting point\n start_point = get_root_folder(service_instance)\n else:\n start_point = get_datacenter(service_instance, datacenter_name)\n if cluster_name:\n # Retrieval to test if cluster exists. Cluster existence only makes\n # sense if the datacenter has been specified\n properties.append('parent')\n\n # Search for the objects\n hosts = get_mors_with_properties(service_instance,\n vim.HostSystem,\n container_ref=start_point,\n property_list=properties)\n log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])\n filtered_hosts = []\n for h in hosts:\n # Complex conditions checking if a host should be added to the\n # filtered list (either due to its name and/or cluster membership)\n\n if cluster_name:\n if not isinstance(h['parent'], vim.ClusterComputeResource):\n continue\n parent_name = get_managed_object_name(h['parent'])\n if parent_name != cluster_name:\n continue\n\n if get_all_hosts:\n filtered_hosts.append(h['object'])\n continue\n\n if h['name'] in host_names:\n filtered_hosts.append(h['object'])\n return filtered_hosts\n",
"def get_datacenter(service_instance, datacenter_name):\n '''\n Returns a vim.Datacenter managed object.\n\n service_instance\n The Service Instance Object from which to obtain datacenter.\n\n datacenter_name\n The datacenter name\n '''\n items = get_datacenters(service_instance,\n datacenter_names=[datacenter_name])\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Datacenter \\'{0}\\' was not found'.format(datacenter_name))\n return items[0]\n",
"def get_cluster(dc_ref, cluster):\n '''\n Returns a cluster in a datacenter.\n\n dc_ref\n The datacenter reference\n\n cluster\n The cluster to be retrieved\n '''\n dc_name = get_managed_object_name(dc_ref)\n log.trace('Retrieving cluster \\'%s\\' from datacenter \\'%s\\'',\n cluster, dc_name)\n si = get_service_instance_from_managed_object(dc_ref, name=dc_name)\n traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(\n path='hostFolder',\n skip=True,\n type=vim.Datacenter,\n selectSet=[vmodl.query.PropertyCollector.TraversalSpec(\n path='childEntity',\n skip=False,\n type=vim.Folder)])\n items = [i['object'] for i in\n get_mors_with_properties(si,\n vim.ClusterComputeResource,\n container_ref=dc_ref,\n property_list=['name'],\n traversal_spec=traversal_spec)\n if i['name'] == cluster]\n if not items:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'Cluster \\'{0}\\' was not found in datacenter '\n '\\'{1}\\''. format(cluster, dc_name))\n return items[0]\n",
"def get_mors_with_properties(service_instance, object_type, property_list=None,\n container_ref=None, traversal_spec=None,\n local_properties=False):\n '''\n Returns a list containing properties and managed object references for the managed object.\n\n service_instance\n The Service Instance from which to obtain managed object references.\n\n object_type\n The type of content for which to obtain managed object references.\n\n property_list\n An optional list of object properties used to return even more filtered managed object reference results.\n\n container_ref\n An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,\n ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory\n rootFolder.\n\n traversal_spec\n An optional TraversalSpec to be used instead of the standard\n ``Traverse All`` spec\n\n local_properties\n Flag specigying whether the properties to be retrieved are local to the\n container. If that is the case, the traversal spec needs to be None.\n '''\n # Get all the content\n content_args = [service_instance, object_type]\n content_kwargs = {'property_list': property_list,\n 'container_ref': container_ref,\n 'traversal_spec': traversal_spec,\n 'local_properties': local_properties}\n try:\n content = get_content(*content_args, **content_kwargs)\n except BadStatusLine:\n content = get_content(*content_args, **content_kwargs)\n except IOError as exc:\n if exc.errno != errno.EPIPE:\n raise exc\n content = get_content(*content_args, **content_kwargs)\n\n object_list = []\n for obj in content:\n properties = {}\n for prop in obj.propSet:\n properties[prop.name] = prop.val\n properties['object'] = obj.obj\n object_list.append(properties)\n log.trace('Retrieved %s objects', len(object_list))\n return object_list\n",
"def get_properties_of_managed_object(mo_ref, properties):\n '''\n Returns specific properties of a managed object, retrieved in an\n optimally.\n\n mo_ref\n The managed object reference.\n\n properties\n List of properties of the managed object to retrieve.\n '''\n service_instance = get_service_instance_from_managed_object(mo_ref)\n log.trace('Retrieving name of %s', type(mo_ref).__name__)\n try:\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=['name'],\n local_properties=True)\n mo_name = items[0]['name']\n except vmodl.query.InvalidProperty:\n mo_name = '<unnamed>'\n log.trace('Retrieving properties \\'%s\\' of %s \\'%s\\'',\n properties, type(mo_ref).__name__, mo_name)\n items = get_mors_with_properties(service_instance,\n type(mo_ref),\n container_ref=mo_ref,\n property_list=properties,\n local_properties=True)\n if not items:\n raise salt.exceptions.VMwareApiError(\n 'Properties of managed object \\'{0}\\' weren\\'t '\n 'retrieved'.format(mo_name))\n return items[0]\n",
"def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,\n get_all_resource_pools=False):\n '''\n Retrieves resource pool objects\n\n service_instance\n The service instance object to query the vCenter\n\n resource_pool_names\n Resource pool names\n\n datacenter_name\n Name of the datacenter where the resource pool is available\n\n get_all_resource_pools\n Boolean\n\n return\n Resourcepool managed object reference\n '''\n\n properties = ['name']\n if not resource_pool_names:\n resource_pool_names = []\n if datacenter_name:\n container_ref = get_datacenter(service_instance, datacenter_name)\n else:\n container_ref = get_root_folder(service_instance)\n\n resource_pools = get_mors_with_properties(service_instance,\n vim.ResourcePool,\n container_ref=container_ref,\n property_list=properties)\n\n selected_pools = []\n for pool in resource_pools:\n if get_all_resource_pools or (pool['name'] in resource_pool_names):\n selected_pools.append(pool['object'])\n if not selected_pools:\n raise salt.exceptions.VMwareObjectRetrievalError(\n 'The resource pools with properties '\n 'names={} get_all={} could not be found'.format(selected_pools,\n get_all_resource_pools))\n\n return selected_pools\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
convert_to_kb
|
python
|
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
|
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3454-L3472
| null |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
power_cycle_vm
|
python
|
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
|
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3475-L3523
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
create_vm
|
python
|
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
|
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3526-L3568
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
register_vm
|
python
|
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
|
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3571-L3618
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
update_vm
|
python
|
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
|
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3621-L3647
|
[
"def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):\n '''\n Waits for a task to be completed.\n\n task\n The task to wait for.\n\n instance_name\n The name of the ESXi host, vCenter Server, or Virtual Machine that\n the task is being run on.\n\n task_type\n The type of task being performed. Useful information for debugging purposes.\n\n sleep_seconds\n The number of seconds to wait before querying the task again.\n Defaults to ``1`` second.\n\n log_level\n The level at which to log task information. Default is ``debug``,\n but ``info`` is also supported.\n '''\n time_counter = 0\n start_time = time.time()\n log.trace('task = %s, task_type = %s', task, task.__class__.__name__)\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n while task_info.state == 'running' or task_info.state == 'queued':\n if time_counter % sleep_seconds == 0:\n msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n time.sleep(1.0 - ((time.time() - start_time) % 1.0))\n time_counter += 1\n try:\n task_info = task.info\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.RuntimeFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareRuntimeError(exc.msg)\n if task_info.state == 'success':\n msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(\n instance_name, task_type, time_counter)\n if log_level == 'info':\n log.info(msg)\n else:\n log.debug(msg)\n # task is in a successful state\n return task_info.result\n else:\n # task is in an error state\n try:\n raise task_info.error\n except vim.fault.NoPermission as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(\n 'Not enough permissions. Required privilege: '\n '{}'.format(exc.privilegeId))\n except vim.fault.FileNotFound as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareFileNotFoundError(exc.msg)\n except vim.fault.VimFault as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareApiError(exc.msg)\n except vmodl.fault.SystemError as exc:\n log.exception(exc)\n raise salt.exceptions.VMwareSystemError(exc.msg)\n except vmodl.fault.InvalidArgument as exc:\n log.exception(exc)\n exc_message = exc.msg\n if exc.faultMessage:\n exc_message = '{0} ({1})'.format(exc_message,\n exc.faultMessage[0].message)\n raise salt.exceptions.VMwareApiError(exc_message)\n",
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
saltstack/salt
|
salt/utils/vmware.py
|
unregister_vm
|
python
|
def unregister_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
vm_ref.UnregisterVM()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
|
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vmware.py#L3675-L3694
|
[
"def get_managed_object_name(mo_ref):\n '''\n Returns the name of a managed object.\n If the name wasn't found, it returns None.\n\n mo_ref\n The managed object reference.\n '''\n props = get_properties_of_managed_object(mo_ref, ['name'])\n return props.get('name')\n"
] |
# -*- coding: utf-8 -*-
'''
Connection library for VMware
.. versionadded:: 2015.8.2
This is a base library used by a number of VMware services such as VMware
ESX, ESXi, and vCenter servers.
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
:codeauthor: Alexandru Bleotu <alexandru.bleotu@morganstanley.com>
Dependencies
~~~~~~~~~~~~
- pyVmomi Python Module
- ESXCLI: This dependency is only needed to use the ``esxcli`` function. No other
functions in this module rely on ESXCLI.
pyVmomi
-------
PyVmomi can be installed via pip:
.. code-block:: bash
pip install pyVmomi
.. note::
Version 6.0 of pyVmomi has some problems with SSL error handling on certain
versions of Python. If using version 6.0 of pyVmomi, Python 2.6,
Python 2.7.9, or newer must be present. This is due to an upstream dependency
in pyVmomi 6.0 that is not supported in Python versions 2.7 to 2.7.8. If the
version of Python is not in the supported range, you will need to install an
earlier version of pyVmomi. See `Issue #29537`_ for more information.
.. _Issue #29537: https://github.com/saltstack/salt/issues/29537
Based on the note above, to install an earlier version of pyVmomi than the
version currently listed in PyPi, run the following:
.. code-block:: bash
pip install pyVmomi==5.5.0.2014.1.1
The 5.5.0.2014.1.1 is a known stable version that this original VMware utils file
was developed against.
ESXCLI
------
This dependency is only needed to use the ``esxcli`` function. At the time of this
writing, no other functions in this module rely on ESXCLI.
The ESXCLI package is also referred to as the VMware vSphere CLI, or vCLI. VMware
provides vCLI package installation instructions for `vSphere 5.5`_ and
`vSphere 6.0`_.
.. _vSphere 5.5: http://pubs.vmware.com/vsphere-55/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
.. _vSphere 6.0: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.vcli.getstart.doc/cli_install.4.2.html
Once all of the required dependencies are in place and the vCLI package is
installed, you can check to see if you can connect to your ESXi host or vCenter
server by running the following command:
.. code-block:: bash
esxcli -s <host-location> -u <username> -p <password> system syslog config get
If the connection was successful, ESXCLI was successfully installed on your system.
You should see output related to the ESXi host's syslog configuration.
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import atexit
import errno
import logging
import time
import sys
import ssl
# Import Salt Libs
import salt.exceptions
import salt.modules.cmdmod
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
from salt.ext.six.moves.http_client import BadStatusLine # pylint: disable=E0611
try:
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, \
SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
try:
import gssapi
import base64
HAS_GSSAPI = True
except ImportError:
HAS_GSSAPI = False
# Get Logging Started
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if PyVmomi is installed.
'''
if HAS_PYVMOMI:
return True
return False, 'Missing dependency: The salt.utils.vmware module requires pyVmomi.'
def esxcli(host, user, pwd, cmd, protocol=None, port=None, esxi_host=None, credstore=None):
'''
Shell out and call the specified esxcli commmand, parse the result
and return something sane.
:param host: ESXi or vCenter host to connect to
:param user: User to connect as, usually root
:param pwd: Password to connect with
:param port: TCP port
:param cmd: esxcli command and arguments
:param esxi_host: If `host` is a vCenter host, then esxi_host is the
ESXi machine on which to execute this command
:param credstore: Optional path to the credential store file
:return: Dictionary
'''
esx_cmd = salt.utils.path.which('esxcli')
if not esx_cmd:
log.error('Missing dependency: The salt.utils.vmware.esxcli function requires ESXCLI.')
return False
# Set default port and protocol if none are provided.
if port is None:
port = 443
if protocol is None:
protocol = 'https'
if credstore:
esx_cmd += ' --credstore \'{0}\''.format(credstore)
if not esxi_host:
# Then we are connecting directly to an ESXi server,
# 'host' points at that server, and esxi_host is a reference to the
# ESXi instance we are manipulating
esx_cmd += ' -s {0} -u {1} -p \'{2}\' ' \
'--protocol={3} --portnumber={4} {5}'.format(host,
user,
pwd,
protocol,
port,
cmd)
else:
esx_cmd += ' -s {0} -h {1} -u {2} -p \'{3}\' ' \
'--protocol={4} --portnumber={5} {6}'.format(host,
esxi_host,
user,
pwd,
protocol,
port,
cmd)
ret = salt.modules.cmdmod.run_all(esx_cmd, output_loglevel='quiet')
return ret
def _get_service_instance(host, username, password, protocol,
port, mechanism, principal, domain):
'''
Internal method to authenticate with a vCenter server or ESX/ESXi host
and return the service instance object.
'''
log.trace('Retrieving new service instance')
token = None
if mechanism == 'userpass':
if username is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'username\' is missing')
if password is None:
raise salt.exceptions.CommandExecutionError(
'Login mechanism userpass was specified but the mandatory '
'parameter \'password\' is missing')
elif mechanism == 'sspi':
if principal is not None and domain is not None:
try:
token = get_gssapi_token(principal, host, domain)
except Exception as exc:
raise salt.exceptions.VMwareConnectionError(six.text_type(exc))
else:
err_msg = 'Login mechanism \'{0}\' was specified but the' \
' mandatory parameters are missing'.format(mechanism)
raise salt.exceptions.CommandExecutionError(err_msg)
else:
raise salt.exceptions.CommandExecutionError(
'Unsupported mechanism: \'{0}\''.format(mechanism))
try:
log.trace('Connecting using the \'%s\' mechanism, with username \'%s\'',
mechanism, username)
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
b64token=token,
mechanism=mechanism)
except TypeError as exc:
if 'unexpected keyword argument' in exc.message:
log.error('Initial connect to the VMware endpoint failed with %s', exc.message)
log.error('This may mean that a version of PyVmomi EARLIER than 6.0.0.2016.6 is installed.')
log.error('We recommend updating to that version or later.')
raise
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
default_msg = 'Could not connect to host \'{0}\'. ' \
'Please check the debug log for more information.'.format(host)
try:
if (isinstance(exc, vim.fault.HostConnectFault) and
'[SSL: CERTIFICATE_VERIFY_FAILED]' in exc.msg) or \
'[SSL: CERTIFICATE_VERIFY_FAILED]' in six.text_type(exc):
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=getattr(ssl, '_create_unverified_context', getattr(ssl, '_create_stdlib_context'))(),
b64token=token,
mechanism=mechanism)
else:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
raise salt.exceptions.VMwareConnectionError(err_msg)
except Exception as exc: # pylint: disable=broad-except
# pyVmomi's SmartConnect() actually raises Exception in some cases.
if 'certificate verify failed' in six.text_type(exc):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
try:
service_instance = SmartConnect(
host=host,
user=username,
pwd=password,
protocol=protocol,
port=port,
sslContext=context,
b64token=token,
mechanism=mechanism
)
except Exception as exc:
log.exception(exc)
err_msg = exc.msg if hasattr(exc, 'msg') else six.text_type(exc)
raise salt.exceptions.VMwareConnectionError(
'Could not connect to host \'{0}\': '
'{1}'.format(host, err_msg))
else:
err_msg = exc.msg if hasattr(exc, 'msg') else default_msg
log.trace(exc)
raise salt.exceptions.VMwareConnectionError(err_msg)
atexit.register(Disconnect, service_instance)
return service_instance
def get_customizationspec_ref(si, customization_spec_name):
'''
Get a reference to a VMware customization spec for the purposes of customizing a clone
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
customization_spec_name
Name of the customization spec
'''
customization_spec_name = si.content.customizationSpecManager.GetCustomizationSpec(name=customization_spec_name)
return customization_spec_name
def get_mor_using_container_view(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_service_instance(host, username=None, password=None, protocol=None,
port=None, mechanism='userpass', principal=None,
domain=None):
'''
Authenticate with a vCenter server or ESX/ESXi host and return the service instance object.
host
The location of the vCenter server or ESX/ESXi host.
username
The username used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
password
The password used to login to the vCenter server or ESX/ESXi host.
Required if mechanism is ``userpass``
protocol
Optionally set to alternate protocol if the vCenter server or ESX/ESXi host is not
using the default protocol. Default protocol is ``https``.
port
Optionally set to alternate port if the vCenter server or ESX/ESXi host is not
using the default port. Default port is ``443``.
mechanism
pyVmomi connection mechanism. Can either be ``userpass`` or ``sspi``.
Default mechanism is ``userpass``.
principal
Kerberos service principal. Required if mechanism is ``sspi``
domain
Kerberos user domain. Required if mechanism is ``sspi``
'''
if protocol is None:
protocol = 'https'
if port is None:
port = 443
service_instance = GetSi()
if service_instance:
stub = GetStub()
if (salt.utils.platform.is_proxy() or
(hasattr(stub, 'host') and
stub.host != ':'.join([host, six.text_type(port)]))):
# Proxies will fork and mess up the cached service instance.
# If this is a proxy or we are connecting to a different host
# invalidate the service instance to avoid a potential memory leak
# and reconnect
Disconnect(service_instance)
service_instance = None
else:
return service_instance
if not service_instance:
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
# Test if data can actually be retrieved or connection has gone stale
log.trace('Checking connection is still authenticated')
try:
service_instance.CurrentTime()
except vim.fault.NotAuthenticated:
log.trace('Session no longer authenticating. Reconnecting')
Disconnect(service_instance)
service_instance = _get_service_instance(host,
username,
password,
protocol,
port,
mechanism,
principal,
domain)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return service_instance
def get_new_service_instance_stub(service_instance, path, ns=None,
version=None):
'''
Returns a stub that points to a different path,
created from an existing connection.
service_instance
The Service Instance.
path
Path of the new stub.
ns
Namespace of the new stub.
Default value is None
version
Version of the new stub.
Default value is None.
'''
# For python 2.7.9 and later, the default SSL context has more strict
# connection handshaking rule. We may need turn off the hostname checking
# and the client side cert verification.
context = None
if sys.version_info[:3] > (2, 7, 8):
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
stub = service_instance._stub
hostname = stub.host.split(':')[0]
session_cookie = stub.cookie.split('"')[1]
VmomiSupport.GetRequestContext()['vcSessionCookie'] = session_cookie
new_stub = SoapStubAdapter(host=hostname,
ns=ns,
path=path,
version=version,
poolSize=0,
sslContext=context)
new_stub.cookie = stub.cookie
return new_stub
def get_service_instance_from_managed_object(mo_ref, name='<unnamed>'):
'''
Retrieves the service instance from a managed object.
me_ref
Reference to a managed object (of type vim.ManagedEntity).
name
Name of managed object. This field is optional.
'''
if not name:
name = mo_ref.name
log.trace('[%s] Retrieving service instance from managed object', name)
si = vim.ServiceInstance('ServiceInstance')
si._stub = mo_ref._stub
return si
def disconnect(service_instance):
'''
Function that disconnects from the vCenter server or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
log.trace('Disconnecting')
try:
Disconnect(service_instance)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def is_connection_to_a_vcenter(service_instance):
'''
Function that returns True if the connection is made to a vCenter Server and
False if the connection is made to an ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
api_type = service_instance.content.about.apiType
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('api_type = %s', api_type)
if api_type == 'VirtualCenter':
return True
elif api_type == 'HostAgent':
return False
else:
raise salt.exceptions.VMwareApiError(
'Unexpected api type \'{0}\' . Supported types: '
'\'VirtualCenter/HostAgent\''.format(api_type))
def get_service_info(service_instance):
'''
Returns information of the vCenter or ESXi host
service_instance
The Service Instance from which to obtain managed object references.
'''
try:
return service_instance.content.about
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def _get_dvs(service_instance, dvs_name):
'''
Return a reference to a Distributed Virtual Switch object.
:param service_instance: PyVmomi service instance
:param dvs_name: Name of DVS to return
:return: A PyVmomi DVS object
'''
switches = list_dvs(service_instance)
if dvs_name in switches:
inventory = get_inventory(service_instance)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [vim.DistributedVirtualSwitch], True)
for item in container.view:
if item.name == dvs_name:
return item
return None
def _get_pnics(host_reference):
'''
Helper function that returns a list of PhysicalNics and their information.
'''
return host_reference.config.network.pnic
def _get_vnics(host_reference):
'''
Helper function that returns a list of VirtualNics and their information.
'''
return host_reference.config.network.vnic
def _get_vnic_manager(host_reference):
'''
Helper function that returns a list of Virtual NicManagers
and their information.
'''
return host_reference.configManager.virtualNicManager
def _get_dvs_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def _get_dvs_uplink_portgroup(dvs, portgroup_name):
'''
Return a portgroup object corresponding to the portgroup name on the dvs
:param dvs: DVS object
:param portgroup_name: Name of portgroup to return
:return: Portgroup object
'''
for portgroup in dvs.portgroup:
if portgroup.name == portgroup_name:
return portgroup
return None
def get_gssapi_token(principal, host, domain):
'''
Get the gssapi token for Kerberos connection
principal
The service principal
host
Host url where we would like to authenticate
domain
Kerberos user domain
'''
if not HAS_GSSAPI:
raise ImportError('The gssapi library is not imported.')
service = '{0}/{1}@{2}'.format(principal, host, domain)
log.debug('Retrieving gsspi token for service %s', service)
service_name = gssapi.Name(service, gssapi.C_NT_USER_NAME)
ctx = gssapi.InitContext(service_name)
in_token = None
while not ctx.established:
out_token = ctx.step(in_token)
if out_token:
if six.PY2:
return base64.b64encode(out_token)
return base64.b64encode(salt.utils.stringutils.to_bytes(out_token))
if ctx.established:
break
if not in_token:
raise salt.exceptions.CommandExecutionError(
'Can\'t receive token, no response from server')
raise salt.exceptions.CommandExecutionError(
'Context established, but didn\'t receive token')
def get_hardware_grains(service_instance):
'''
Return hardware info for standard minion grains if the service_instance is a HostAgent type
service_instance
The service instance object to get hardware info for
.. versionadded:: 2016.11.0
'''
hw_grain_data = {}
if get_inventory(service_instance).about.apiType == 'HostAgent':
view = service_instance.content.viewManager.CreateContainerView(service_instance.RetrieveContent().rootFolder,
[vim.HostSystem], True)
if view and view.view:
hw_grain_data['manufacturer'] = view.view[0].hardware.systemInfo.vendor
hw_grain_data['productname'] = view.view[0].hardware.systemInfo.model
for _data in view.view[0].hardware.systemInfo.otherIdentifyingInfo:
if _data.identifierType.key == 'ServiceTag':
hw_grain_data['serialnumber'] = _data.identifierValue
hw_grain_data['osfullname'] = view.view[0].summary.config.product.fullName
hw_grain_data['osmanufacturer'] = view.view[0].summary.config.product.vendor
hw_grain_data['osrelease'] = view.view[0].summary.config.product.version
hw_grain_data['osbuild'] = view.view[0].summary.config.product.build
hw_grain_data['os_family'] = view.view[0].summary.config.product.name
hw_grain_data['os'] = view.view[0].summary.config.product.name
hw_grain_data['mem_total'] = view.view[0].hardware.memorySize /1024/1024
hw_grain_data['biosversion'] = view.view[0].hardware.biosInfo.biosVersion
hw_grain_data['biosreleasedate'] = view.view[0].hardware.biosInfo.releaseDate.date().strftime('%m/%d/%Y')
hw_grain_data['cpu_model'] = view.view[0].hardware.cpuPkg[0].description
hw_grain_data['kernel'] = view.view[0].summary.config.product.productLineId
hw_grain_data['num_cpu_sockets'] = view.view[0].hardware.cpuInfo.numCpuPackages
hw_grain_data['num_cpu_cores'] = view.view[0].hardware.cpuInfo.numCpuCores
hw_grain_data['num_cpus'] = hw_grain_data['num_cpu_sockets'] * hw_grain_data['num_cpu_cores']
hw_grain_data['ip_interfaces'] = {}
hw_grain_data['ip4_interfaces'] = {}
hw_grain_data['ip6_interfaces'] = {}
hw_grain_data['hwaddr_interfaces'] = {}
for _vnic in view.view[0].configManager.networkSystem.networkConfig.vnic:
hw_grain_data['ip_interfaces'][_vnic.device] = []
hw_grain_data['ip4_interfaces'][_vnic.device] = []
hw_grain_data['ip6_interfaces'][_vnic.device] = []
hw_grain_data['ip_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
hw_grain_data['ip4_interfaces'][_vnic.device].append(_vnic.spec.ip.ipAddress)
if _vnic.spec.ip.ipV6Config:
hw_grain_data['ip6_interfaces'][_vnic.device].append(_vnic.spec.ip.ipV6Config.ipV6Address)
hw_grain_data['hwaddr_interfaces'][_vnic.device] = _vnic.spec.mac
hw_grain_data['host'] = view.view[0].configManager.networkSystem.dnsConfig.hostName
hw_grain_data['domain'] = view.view[0].configManager.networkSystem.dnsConfig.domainName
hw_grain_data['fqdn'] = '{0}{1}{2}'.format(
view.view[0].configManager.networkSystem.dnsConfig.hostName,
('.' if view.view[0].configManager.networkSystem.dnsConfig.domainName else ''),
view.view[0].configManager.networkSystem.dnsConfig.domainName)
for _pnic in view.view[0].configManager.networkSystem.networkInfo.pnic:
hw_grain_data['hwaddr_interfaces'][_pnic.device] = _pnic.mac
hw_grain_data['timezone'] = view.view[0].configManager.dateTimeSystem.dateTimeInfo.timeZone.name
view = None
return hw_grain_data
def get_inventory(service_instance):
'''
Return the inventory of a Service Instance Object.
service_instance
The Service Instance Object for which to obtain inventory.
'''
return service_instance.RetrieveContent()
def get_root_folder(service_instance):
'''
Returns the root folder of a vCenter.
service_instance
The Service Instance Object for which to obtain the root folder.
'''
try:
log.trace('Retrieving root folder')
return service_instance.RetrieveContent().rootFolder
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_content(service_instance, obj_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns the content of the specified type of object for a Service Instance.
For more information, please see:
http://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch5_PropertyCollector.7.6.html
service_instance
The Service Instance from which to obtain content.
obj_type
The type of content to obtain.
property_list
An optional list of object properties to used to return even more filtered content results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec.
local_properties
Flag specifying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Start at the rootFolder if container starting point not specified
if not container_ref:
container_ref = get_root_folder(service_instance)
# By default, the object reference used as the starting poing for the filter
# is the container_ref passed in the function
obj_ref = container_ref
local_traversal_spec = False
if not traversal_spec and not local_properties:
local_traversal_spec = True
# We don't have a specific traversal spec override so we are going to
# get everything using a container view
try:
obj_ref = service_instance.content.viewManager.CreateContainerView(
container_ref, [obj_type], True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Create 'Traverse All' traversal spec to determine the path for
# collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='traverseEntities',
path='view',
skip=False,
type=vim.view.ContainerView
)
# Create property spec to determine properties to be retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec(
type=obj_type,
all=True if not property_list else False,
pathSet=property_list
)
# Create object spec to navigate content
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(
obj=obj_ref,
skip=True if not local_properties else False,
selectSet=[traversal_spec] if not local_properties else None
)
# Create a filter spec and specify object, property spec in it
filter_spec = vmodl.query.PropertyCollector.FilterSpec(
objectSet=[obj_spec],
propSet=[property_spec],
reportMissingObjectsInResults=False
)
# Retrieve the contents
try:
content = service_instance.content.propertyCollector.RetrieveContents([filter_spec])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
# Destroy the object view
if local_traversal_spec:
try:
obj_ref.Destroy()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return content
def get_mor_by_property(service_instance, object_type, property_value, property_name='name', container_ref=None):
'''
Returns the first managed object reference having the specified property value.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_value
The name of the property for which to obtain the managed object reference.
property_name
An object property used to return the specified object reference results. Defaults to ``name``.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
'''
# Get list of all managed object references with specified property
object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref)
for obj in object_list:
obj_id = six.text_type(obj.get('object', '')).strip('\'"')
if obj[property_name] == property_value or property_value == obj_id:
return obj['object']
return None
def get_mors_with_properties(service_instance, object_type, property_list=None,
container_ref=None, traversal_spec=None,
local_properties=False):
'''
Returns a list containing properties and managed object references for the managed object.
service_instance
The Service Instance from which to obtain managed object references.
object_type
The type of content for which to obtain managed object references.
property_list
An optional list of object properties used to return even more filtered managed object reference results.
container_ref
An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter,
ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory
rootFolder.
traversal_spec
An optional TraversalSpec to be used instead of the standard
``Traverse All`` spec
local_properties
Flag specigying whether the properties to be retrieved are local to the
container. If that is the case, the traversal spec needs to be None.
'''
# Get all the content
content_args = [service_instance, object_type]
content_kwargs = {'property_list': property_list,
'container_ref': container_ref,
'traversal_spec': traversal_spec,
'local_properties': local_properties}
try:
content = get_content(*content_args, **content_kwargs)
except BadStatusLine:
content = get_content(*content_args, **content_kwargs)
except IOError as exc:
if exc.errno != errno.EPIPE:
raise exc
content = get_content(*content_args, **content_kwargs)
object_list = []
for obj in content:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
properties['object'] = obj.obj
object_list.append(properties)
log.trace('Retrieved %s objects', len(object_list))
return object_list
def get_properties_of_managed_object(mo_ref, properties):
'''
Returns specific properties of a managed object, retrieved in an
optimally.
mo_ref
The managed object reference.
properties
List of properties of the managed object to retrieve.
'''
service_instance = get_service_instance_from_managed_object(mo_ref)
log.trace('Retrieving name of %s', type(mo_ref).__name__)
try:
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=['name'],
local_properties=True)
mo_name = items[0]['name']
except vmodl.query.InvalidProperty:
mo_name = '<unnamed>'
log.trace('Retrieving properties \'%s\' of %s \'%s\'',
properties, type(mo_ref).__name__, mo_name)
items = get_mors_with_properties(service_instance,
type(mo_ref),
container_ref=mo_ref,
property_list=properties,
local_properties=True)
if not items:
raise salt.exceptions.VMwareApiError(
'Properties of managed object \'{0}\' weren\'t '
'retrieved'.format(mo_name))
return items[0]
def get_managed_object_name(mo_ref):
'''
Returns the name of a managed object.
If the name wasn't found, it returns None.
mo_ref
The managed object reference.
'''
props = get_properties_of_managed_object(mo_ref, ['name'])
return props.get('name')
def get_network_adapter_type(adapter_type):
'''
Return the network adapter type.
adpater_type
The adapter type from which to obtain the network adapter type.
'''
if adapter_type == 'vmxnet':
return vim.vm.device.VirtualVmxnet()
elif adapter_type == 'vmxnet2':
return vim.vm.device.VirtualVmxnet2()
elif adapter_type == 'vmxnet3':
return vim.vm.device.VirtualVmxnet3()
elif adapter_type == 'e1000':
return vim.vm.device.VirtualE1000()
elif adapter_type == 'e1000e':
return vim.vm.device.VirtualE1000e()
raise ValueError('An unknown network adapter object type name.')
def get_network_adapter_object_type(adapter_object):
'''
Returns the network adapter type.
adapter_object
The adapter object from which to obtain the network adapter type.
'''
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet2):
return 'vmxnet2'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet3):
return 'vmxnet3'
if isinstance(adapter_object, vim.vm.device.VirtualVmxnet):
return 'vmxnet'
if isinstance(adapter_object, vim.vm.device.VirtualE1000e):
return 'e1000e'
if isinstance(adapter_object, vim.vm.device.VirtualE1000):
return 'e1000'
raise ValueError('An unknown network adapter object type.')
def get_dvss(dc_ref, dvs_names=None, get_all_dvss=False):
'''
Returns distributed virtual switches (DVSs) in a datacenter.
dc_ref
The parent datacenter reference.
dvs_names
The names of the DVSs to return. Default is None.
get_all_dvss
Return all DVSs in the datacenter. Default is False.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace(
'Retrieving DVSs in datacenter \'%s\', dvs_names=\'%s\', get_all_dvss=%s',
dc_name,
','.join(dvs_names) if dvs_names else None,
get_all_dvss
)
properties = ['name']
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
service_instance = get_service_instance_from_managed_object(dc_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualSwitch,
container_ref=dc_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_dvss or (dvs_names and i['name'] in dvs_names)]
return items
def get_network_folder(dc_ref):
'''
Retrieves the network folder of a datacenter
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving network folder in datacenter \'%s\'', dc_name)
service_instance = get_service_instance_from_managed_object(dc_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=False,
type=vim.Datacenter)
entries = get_mors_with_properties(service_instance,
vim.Folder,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if not entries:
raise salt.exceptions.VMwareObjectRetrievalError(
'Network folder in datacenter \'{0}\' wasn\'t retrieved'
''.format(dc_name))
return entries[0]['object']
def create_dvs(dc_ref, dvs_name, dvs_create_spec=None):
'''
Creates a distributed virtual switches (DVS) in a datacenter.
Returns the reference to the newly created distributed virtual switch.
dc_ref
The parent datacenter reference.
dvs_name
The name of the DVS to create.
dvs_create_spec
The DVS spec (vim.DVSCreateSpec) to use when creating the DVS.
Default is None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating DVS \'%s\' in datacenter \'%s\'', dvs_name, dc_name)
if not dvs_create_spec:
dvs_create_spec = vim.DVSCreateSpec()
if not dvs_create_spec.configSpec:
dvs_create_spec.configSpec = vim.VMwareDVSConfigSpec()
dvs_create_spec.configSpec.name = dvs_name
netw_folder_ref = get_network_folder(dc_ref)
try:
task = netw_folder_ref.CreateDVS_Task(dvs_create_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvs(dvs_ref, dvs_config_spec):
'''
Updates a distributed virtual switch with the config_spec.
dvs_ref
The DVS reference.
dvs_config_spec
The updated config spec (vim.VMwareDVSConfigSpec) to be applied to
the DVS.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Updating dvs \'%s\'', dvs_name)
try:
task = dvs_ref.ReconfigureDvs_Task(dvs_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def set_dvs_network_resource_management_enabled(dvs_ref, enabled):
'''
Sets whether NIOC is enabled on a DVS.
dvs_ref
The DVS reference.
enabled
Flag specifying whether NIOC is enabled.
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Setting network resource management enable to %s on '
'dvs \'%s\'', enabled, dvs_name)
try:
dvs_ref.EnableNetworkResourceManagement(enable=enabled)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_dvportgroups(parent_ref, portgroup_names=None,
get_all_portgroups=False):
'''
Returns distributed virtual porgroups (dvportgroups).
The parent object can be either a datacenter or a dvs.
parent_ref
The parent object reference. Can be either a datacenter or a dvs.
portgroup_names
The names of the dvss to return. Default is None.
get_all_portgroups
Return all portgroups in the parent. Default is False.
'''
if not (isinstance(parent_ref,
(vim.Datacenter, vim.DistributedVirtualSwitch))):
raise salt.exceptions.ArgumentValueError(
'Parent has to be either a datacenter, '
'or a distributed virtual switch')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving portgroup in %s \'%s\', portgroups_names=\'%s\', '
'get_all_portgroups=%s',
type(parent_ref).__name__,
parent_name,
','.join(portgroup_names) if portgroup_names else None,
get_all_portgroups)
properties = ['name']
if isinstance(parent_ref, vim.Datacenter):
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
else: # parent is distributed virtual switch
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(parent_ref)
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_portgroups or
(portgroup_names and i['name'] in portgroup_names)]
return items
def get_uplink_dvportgroup(dvs_ref):
'''
Returns the uplink distributed virtual portgroup of a distributed virtual
switch (dvs)
dvs_ref
The dvs reference
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Retrieving uplink portgroup of dvs \'%s\'', dvs_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='portgroup',
skip=False,
type=vim.DistributedVirtualSwitch)
service_instance = get_service_instance_from_managed_object(dvs_ref)
items = [entry['object'] for entry in
get_mors_with_properties(service_instance,
vim.DistributedVirtualPortgroup,
container_ref=dvs_ref,
property_list=['tag'],
traversal_spec=traversal_spec)
if entry['tag'] and
[t for t in entry['tag'] if t.key == 'SYSTEM/DVS.UPLINKPG']]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Uplink portgroup of DVS \'{0}\' wasn\'t found'.format(dvs_name))
return items[0]
def create_dvportgroup(dvs_ref, spec):
'''
Creates a distributed virtual portgroup on a distributed virtual switch
(dvs)
dvs_ref
The dvs reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
dvs_name = get_managed_object_name(dvs_ref)
log.trace('Adding portgroup %s to dvs \'%s\'', spec.name, dvs_name)
log.trace('spec = %s', spec)
try:
task = dvs_ref.CreateDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, dvs_name, six.text_type(task.__class__))
def update_dvportgroup(portgroup_ref, spec):
'''
Updates a distributed virtual portgroup
portgroup_ref
The portgroup reference
spec
Portgroup spec (vim.DVPortgroupConfigSpec)
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Updating portgrouo %s', pg_name)
try:
task = portgroup_ref.ReconfigureDVPortgroup_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def remove_dvportgroup(portgroup_ref):
'''
Removes a distributed virtual portgroup
portgroup_ref
The portgroup reference
'''
pg_name = get_managed_object_name(portgroup_ref)
log.trace('Removing portgroup %s', pg_name)
try:
task = portgroup_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, pg_name, six.text_type(task.__class__))
def get_networks(parent_ref, network_names=None, get_all_networks=False):
'''
Returns networks of standard switches.
The parent object can be a datacenter.
parent_ref
The parent object reference. A datacenter object.
network_names
The name of the standard switch networks. Default is None.
get_all_networks
Boolean indicates whether to return all networks in the parent.
Default is False.
'''
if not isinstance(parent_ref, vim.Datacenter):
raise salt.exceptions.ArgumentValueError(
'Parent has to be a datacenter.')
parent_name = get_managed_object_name(parent_ref)
log.trace('Retrieving network from %s \'%s\', network_names=\'%s\', '
'get_all_networks=%s',
type(parent_ref).__name__,
parent_name,
','.join(network_names) if network_names else None,
get_all_networks)
properties = ['name']
service_instance = get_service_instance_from_managed_object(parent_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='networkFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Network,
container_ref=parent_ref,
property_list=properties,
traversal_spec=traversal_spec)
if get_all_networks or
(network_names and i['name'] in network_names)]
return items
def list_objects(service_instance, vim_object, properties=None):
'''
Returns a simple list of objects from a given service instance.
service_instance
The Service Instance for which to obtain a list of objects.
object_type
The type of content for which to obtain information.
properties
An optional list of object properties used to return reference results.
If not provided, defaults to ``name``.
'''
if properties is None:
properties = ['name']
items = []
item_list = get_mors_with_properties(service_instance, vim_object, properties)
for item in item_list:
items.append(item['name'])
return items
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
def get_license_assignment_manager(service_instance):
'''
Returns the license assignment manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license assignment manager')
try:
lic_assignment_manager = \
service_instance.content.licenseManager.licenseAssignmentManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not lic_assignment_manager:
raise salt.exceptions.VMwareObjectRetrievalError(
'License assignment manager was not retrieved')
return lic_assignment_manager
def get_licenses(service_instance, license_manager=None):
'''
Returns the licenses on a specific instance.
service_instance
The Service Instance Object from which to obrain the licenses.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
log.debug('Retrieving licenses')
try:
return license_manager.licenses
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def add_license(service_instance, key, description, license_manager=None):
'''
Adds a license.
service_instance
The Service Instance Object.
key
The key of the license to add.
description
The description of the license to add.
license_manager
The License Manager object of the service instance. If not provided it
will be retrieved.
'''
if not license_manager:
license_manager = get_license_manager(service_instance)
label = vim.KeyValue()
label.key = 'VpxClientLicenseLabel'
label.value = description
log.debug('Adding license \'%s\'', description)
try:
vmware_license = license_manager.AddLicense(key, [label])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def get_assigned_licenses(service_instance, entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Returns the licenses assigned to an entity. If entity ref is not provided,
then entity_name is assumed to be the vcenter. This is later checked if
the entity name is provided.
service_instance
The Service Instance Object from which to obtain the licenses.
entity_ref
VMware entity to get the assigned licenses for.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved.
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
if not entity_name:
raise salt.exceptions.ArgumentValueError('No entity_name passed')
# If entity_ref is not defined, then interested in the vcenter
entity_id = None
entity_type = 'moid'
check_name = False
if not entity_ref:
if entity_name:
check_name = True
entity_type = 'uuid'
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
entity_id = entity_ref._moId
log.trace('Retrieving licenses assigned to \'%s\'', entity_name)
try:
assignments = \
license_assignment_manager.QueryAssignedLicenses(entity_id)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if entity_type == 'uuid' and len(assignments) > 1:
log.trace('Unexpectectedly retrieved more than one'
' VCenter license assignment.')
raise salt.exceptions.VMwareObjectRetrievalError(
'Unexpected return. Expect only a single assignment')
if check_name:
if entity_name != assignments[0].entityDisplayName:
log.trace('Getting license info for wrong vcenter: %s != %s',
entity_name, assignments[0].entityDisplayName)
raise salt.exceptions.VMwareObjectRetrievalError(
'Got license assignment info for a different vcenter')
return [a.assignedLicense for a in assignments]
def assign_license(service_instance, license_key, license_name,
entity_ref=None, entity_name=None,
license_assignment_manager=None):
'''
Assigns a license to an entity.
service_instance
The Service Instance Object from which to obrain the licenses.
license_key
The key of the license to add.
license_name
The description of the license to add.
entity_ref
VMware entity to assign the license to.
If None, the entity is the vCenter itself.
Default is None.
entity_name
Entity name used in logging.
Default is None.
license_assignment_manager
The LicenseAssignmentManager object of the service instance.
If not provided it will be retrieved
Default is None.
'''
if not license_assignment_manager:
license_assignment_manager = \
get_license_assignment_manager(service_instance)
entity_id = None
if not entity_ref:
# vcenter
try:
entity_id = service_instance.content.about.instanceUuid
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not entity_name:
entity_name = 'vCenter'
else:
# e.g. vsan cluster or host
entity_id = entity_ref._moId
log.trace('Assigning license to \'%s\'', entity_name)
try:
vmware_license = license_assignment_manager.UpdateAssignedLicense(
entity_id,
license_key,
license_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return vmware_license
def list_datacenters(service_instance):
'''
Returns a list of datacenters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datacenters.
'''
return list_objects(service_instance, vim.Datacenter)
def get_datacenters(service_instance, datacenter_names=None,
get_all_datacenters=False):
'''
Returns all datacenters in a vCenter.
service_instance
The Service Instance Object from which to obtain cluster.
datacenter_names
List of datacenter names to filter by. Default value is None.
get_all_datacenters
Flag specifying whether to retrieve all datacenters.
Default value is None.
'''
items = [i['object'] for i in
get_mors_with_properties(service_instance,
vim.Datacenter,
property_list=['name'])
if get_all_datacenters or
(datacenter_names and i['name'] in datacenter_names)]
return items
def get_datacenter(service_instance, datacenter_name):
'''
Returns a vim.Datacenter managed object.
service_instance
The Service Instance Object from which to obtain datacenter.
datacenter_name
The datacenter name
'''
items = get_datacenters(service_instance,
datacenter_names=[datacenter_name])
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datacenter \'{0}\' was not found'.format(datacenter_name))
return items[0]
def create_datacenter(service_instance, datacenter_name):
'''
Creates a datacenter.
.. versionadded:: 2017.7.0
service_instance
The Service Instance Object
datacenter_name
The datacenter name
'''
root_folder = get_root_folder(service_instance)
log.trace('Creating datacenter \'%s\'', datacenter_name)
try:
dc_obj = root_folder.CreateDatacenter(datacenter_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return dc_obj
def get_cluster(dc_ref, cluster):
'''
Returns a cluster in a datacenter.
dc_ref
The datacenter reference
cluster
The cluster to be retrieved
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Retrieving cluster \'%s\' from datacenter \'%s\'',
cluster, dc_name)
si = get_service_instance_from_managed_object(dc_ref, name=dc_name)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='hostFolder',
skip=True,
type=vim.Datacenter,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
skip=False,
type=vim.Folder)])
items = [i['object'] for i in
get_mors_with_properties(si,
vim.ClusterComputeResource,
container_ref=dc_ref,
property_list=['name'],
traversal_spec=traversal_spec)
if i['name'] == cluster]
if not items:
raise salt.exceptions.VMwareObjectRetrievalError(
'Cluster \'{0}\' was not found in datacenter '
'\'{1}\''. format(cluster, dc_name))
return items[0]
def create_cluster(dc_ref, cluster_name, cluster_spec):
'''
Creates a cluster in a datacenter.
dc_ref
The parent datacenter reference.
cluster_name
The cluster name.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
dc_name = get_managed_object_name(dc_ref)
log.trace('Creating cluster \'%s\' in datacenter \'%s\'',
cluster_name, dc_name)
try:
dc_ref.hostFolder.CreateClusterEx(cluster_name, cluster_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def update_cluster(cluster_ref, cluster_spec):
'''
Updates a cluster in a datacenter.
cluster_ref
The cluster reference.
cluster_spec
The cluster spec (vim.ClusterConfigSpecEx).
Defaults to None.
'''
cluster_name = get_managed_object_name(cluster_ref)
log.trace('Updating cluster \'%s\'', cluster_name)
try:
task = cluster_ref.ReconfigureComputeResource_Task(cluster_spec,
modify=True)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, cluster_name, 'ClusterUpdateTask')
def list_clusters(service_instance):
'''
Returns a list of clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain clusters.
'''
return list_objects(service_instance, vim.ClusterComputeResource)
def list_datastore_clusters(service_instance):
'''
Returns a list of datastore clusters associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastore clusters.
'''
return list_objects(service_instance, vim.StoragePod)
def list_datastores(service_instance):
'''
Returns a list of datastores associated with a given service instance.
service_instance
The Service Instance Object from which to obtain datastores.
'''
return list_objects(service_instance, vim.Datastore)
def list_datastores_full(service_instance):
'''
Returns a list of datastores associated with a given service instance.
The list contains basic information about the datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
'''
datastores_list = list_objects(service_instance, vim.Datastore)
datastores = {}
for datastore in datastores_list:
datastores[datastore] = list_datastore_full(service_instance, datastore)
return datastores
def list_datastore_full(service_instance, datastore):
'''
Returns a dictionary with the basic information for the given datastore:
name, type, url, capacity, free, used, usage, hosts
service_instance
The Service Instance Object from which to obtain datastores.
datastore
Name of the datastore.
'''
datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore)
if not datastore_object:
raise salt.exceptions.VMwareObjectRetrievalError(
'Datastore \'{0}\' does not exist.'.format(datastore)
)
items = {}
items['name'] = str(datastore_object.summary.name).replace("'", "")
items['type'] = str(datastore_object.summary.type).replace("'", "")
items['url'] = str(datastore_object.summary.url).replace("'", "")
items['capacity'] = datastore_object.summary.capacity / 1024 / 1024
items['free'] = datastore_object.summary.freeSpace / 1024 / 1024
items['used'] = items['capacity'] - items['free']
items['usage'] = (float(items['used']) / float(items['capacity'])) * 100
items['hosts'] = []
for host in datastore_object.host:
host_key = str(host.key).replace("'", "").split(":", 1)[1]
host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key)
items['hosts'].append(host_object.name)
return items
def get_mor_by_name(si, obj_type, obj_name):
'''
Get reference to an object of specified object type and name
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_name
Name of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item.name == obj_name:
return item
return None
def get_mor_by_moid(si, obj_type, obj_moid):
'''
Get reference to an object of specified object type and id
si
ServiceInstance for the vSphere or ESXi server (see get_service_instance)
obj_type
Type of the object (vim.StoragePod, vim.Datastore, etc)
obj_moid
ID of the object
'''
inventory = get_inventory(si)
container = inventory.viewManager.CreateContainerView(inventory.rootFolder, [obj_type], True)
for item in container.view:
if item._moId == obj_moid:
return item
return None
def get_datastore_files(service_instance, directory, datastores, container_object, browser_spec):
'''
Get the files with a given browser specification from the datastore.
service_instance
The Service Instance Object from which to obtain datastores.
directory
The name of the directory where we would like to search
datastores
Name of the datastores
container_object
The base object for searches
browser_spec
BrowserSpec object which defines the search criteria
return
list of vim.host.DatastoreBrowser.SearchResults objects
'''
files = []
datastore_objects = get_datastores(service_instance, container_object, datastore_names=datastores)
for datobj in datastore_objects:
try:
task = datobj.browser.SearchDatastore_Task(datastorePath='[{}] {}'.format(datobj.name, directory),
searchSpec=browser_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
files.append(salt.utils.vmware.wait_for_task(task, directory, 'query virtual machine files'))
except salt.exceptions.VMwareFileNotFoundError:
pass
return files
def get_datastores(service_instance, reference, datastore_names=None,
backing_disk_ids=None, get_all_datastores=False):
'''
Returns a list of vim.Datastore objects representing the datastores visible
from a VMware object, filtered by their names, or the backing disk
cannonical name or scsi_addresses
service_instance
The Service Instance Object from which to obtain datastores.
reference
The VMware object from which the datastores are visible.
datastore_names
The list of datastore names to be retrieved. Default value is None.
backing_disk_ids
The list of canonical names of the disks backing the datastores
to be retrieved. Only supported if reference is a vim.HostSystem.
Default value is None
get_all_datastores
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
obj_name = get_managed_object_name(reference)
if get_all_datastores:
log.trace('Retrieving all datastores visible to \'%s\'', obj_name)
else:
log.trace('Retrieving datastores visible to \'%s\': names = (%s); '
'backing disk ids = (%s)',
obj_name, datastore_names, backing_disk_ids)
if backing_disk_ids and not isinstance(reference, vim.HostSystem):
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\' when backing disk filter '
'is set'.format(reference.__class__.__name__))
if (not get_all_datastores) and backing_disk_ids:
# At this point we know the reference is a vim.HostSystem
log.trace('Filtering datastores with backing disk ids: %s',
backing_disk_ids)
storage_system = get_storage_system(service_instance, reference,
obj_name)
props = salt.utils.vmware.get_properties_of_managed_object(
storage_system, ['fileSystemVolumeInfo.mountInfo'])
mount_infos = props.get('fileSystemVolumeInfo.mountInfo', [])
disk_datastores = []
# Non vmfs volumes aren't backed by a disk
for vol in [i.volume for i in mount_infos if
isinstance(i.volume, vim.HostVmfsVolume)]:
if not [e for e in vol.extent if e.diskName in backing_disk_ids]:
# Skip volume if it doesn't contain an extent with a
# canonical name of interest
continue
log.trace('Found datastore \'%s\' for disk id(s) \'%s\'',
vol.name, [e.diskName for e in vol.extent])
disk_datastores.append(vol.name)
log.trace('Datastore found for disk filter: %s', disk_datastores)
if datastore_names:
datastore_names.extend(disk_datastores)
else:
datastore_names = disk_datastores
if (not get_all_datastores) and (not datastore_names):
log.trace('No datastore to be filtered after retrieving the datastores '
'backed by the disk id(s) \'%s\'', backing_disk_ids)
return []
log.trace('datastore_names = %s', datastore_names)
# Use the default traversal spec
if isinstance(reference, vim.HostSystem):
# Create a different traversal spec for hosts because it looks like the
# default doesn't retrieve the datastores
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='host_datastore_traversal',
path='datastore',
skip=False,
type=vim.HostSystem)
elif isinstance(reference, vim.ClusterComputeResource):
# Traversal spec for clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='cluster_datastore_traversal',
path='datastore',
skip=False,
type=vim.ClusterComputeResource)
elif isinstance(reference, vim.Datacenter):
# Traversal spec for datacenter
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datacenter_datastore_traversal',
path='datastore',
skip=False,
type=vim.Datacenter)
elif isinstance(reference, vim.StoragePod):
# Traversal spec for datastore clusters
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
name='datastore_cluster_traversal',
path='childEntity',
skip=False,
type=vim.StoragePod)
elif isinstance(reference, vim.Folder) and \
get_managed_object_name(reference) == 'Datacenters':
# Traversal of root folder (doesn't support multiple levels of Folders)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='childEntity',
selectSet=[
vmodl.query.PropertyCollector.TraversalSpec(
path='datastore',
skip=False,
type=vim.Datacenter)],
skip=False,
type=vim.Folder)
else:
raise salt.exceptions.ArgumentValueError(
'Unsupported reference type \'{0}\''
''.format(reference.__class__.__name__))
items = get_mors_with_properties(service_instance,
object_type=vim.Datastore,
property_list=['name'],
container_ref=reference,
traversal_spec=traversal_spec)
log.trace('Retrieved %s datastores', len(items))
items = [i for i in items if get_all_datastores or i['name'] in
datastore_names]
log.trace('Filtered datastores: %s', [i['name'] for i in items])
return [i['object'] for i in items]
def rename_datastore(datastore_ref, new_datastore_name):
'''
Renames a datastore
datastore_ref
vim.Datastore reference to the datastore object to be changed
new_datastore_name
New datastore name
'''
ds_name = get_managed_object_name(datastore_ref)
log.trace("Renaming datastore '%s' to '%s'", ds_name, new_datastore_name)
try:
datastore_ref.RenameDatastore(new_datastore_name)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
def get_storage_system(service_instance, host_ref, hostname=None):
'''
Returns a host's storage system
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
property_list=['systemFile'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved storage system', hostname)
return objs[0]['object']
def _get_partition_info(storage_system, device_path):
'''
Returns partition informations for a device path, of type
vim.HostDiskPartitionInfo
'''
try:
partition_infos = \
storage_system.RetrieveDiskPartitionInfo(
devicePath=[device_path])
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('partition_info = %s', partition_infos[0])
return partition_infos[0]
def _get_new_computed_partition_spec(storage_system,
device_path,
partition_info):
'''
Computes the new disk partition info when adding a new vmfs partition that
uses up the remainder of the disk; returns a tuple
(new_partition_number, vim.HostDiskPartitionSpec
'''
log.trace('Adding a partition at the end of the disk and getting the new '
'computed partition spec')
# TODO implement support for multiple partitions
# We support adding a partition add the end of the disk with partitions
free_partitions = [p for p in partition_info.layout.partition
if p.type == 'none']
if not free_partitions:
raise salt.exceptions.VMwareObjectNotFoundError(
'Free partition was not found on device \'{0}\''
''.format(partition_info.deviceName))
free_partition = free_partitions[0]
# Create a layout object that copies the existing one
layout = vim.HostDiskPartitionLayout(
total=partition_info.layout.total,
partition=partition_info.layout.partition)
# Create a partition with the free space on the disk
# Change the free partition type to vmfs
free_partition.type = 'vmfs'
try:
computed_partition_info = storage_system.ComputeDiskPartitionInfo(
devicePath=device_path,
partitionFormat=vim.HostDiskPartitionInfoPartitionFormat.gpt,
layout=layout)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('computed partition info = {0}', computed_partition_info)
log.trace('Retrieving new partition number')
partition_numbers = [p.partition for p in
computed_partition_info.layout.partition
if (p.start.block == free_partition.start.block or
# XXX If the entire disk is free (i.e. the free
# disk partition starts at block 0) the newily
# created partition is created from block 1
(free_partition.start.block == 0 and
p.start.block == 1)) and
p.end.block == free_partition.end.block and
p.type == 'vmfs']
if not partition_numbers:
raise salt.exceptions.VMwareNotFoundError(
'New partition was not found in computed partitions of device '
'\'{0}\''.format(partition_info.deviceName))
log.trace('new partition number = %s', partition_numbers[0])
return (partition_numbers[0], computed_partition_info.spec)
def create_vmfs_datastore(host_ref, datastore_name, disk_ref,
vmfs_major_version, storage_system=None):
'''
Creates a VMFS datastore from a disk_id
host_ref
vim.HostSystem object referencing a host to create the datastore on
datastore_name
Name of the datastore
disk_ref
vim.HostScsiDislk on which the datastore is created
vmfs_major_version
VMFS major version to use
'''
# TODO Support variable sized partitions
hostname = get_managed_object_name(host_ref)
disk_id = disk_ref.canonicalName
log.debug('Creating datastore \'%s\' on host \'%s\', scsi disk \'%s\', '
'vmfs v%s', datastore_name, hostname, disk_id, vmfs_major_version)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
target_disk = disk_ref
partition_info = _get_partition_info(storage_system,
target_disk.devicePath)
log.trace('partition_info = %s', partition_info)
new_partition_number, partition_spec = _get_new_computed_partition_spec(
storage_system,
target_disk.devicePath,
partition_info
)
spec = vim.VmfsDatastoreCreateSpec(
vmfs=vim.HostVmfsSpec(
majorVersion=vmfs_major_version,
volumeName=datastore_name,
extent=vim.HostScsiDiskPartition(
diskName=disk_id,
partition=new_partition_number)),
diskUuid=target_disk.uuid,
partition=partition_spec)
try:
ds_ref = \
host_ref.configManager.datastoreSystem.CreateVmfsDatastore(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.debug('Created datastore \'%s\' on host \'%s\'', datastore_name, hostname)
return ds_ref
def get_host_datastore_system(host_ref, hostname=None):
'''
Returns a host's datastore system
host_ref
Reference to the ESXi host
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.datastoreSystem',
type=vim.HostSystem,
skip=False)
objs = get_mors_with_properties(service_instance,
vim.HostDatastoreSystem,
property_list=['datastore'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not objs:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' datastore system was not retrieved'
''.format(hostname))
log.trace('[%s] Retrieved datastore system', hostname)
return objs[0]['object']
def remove_datastore(service_instance, datastore_ref):
'''
Creates a VMFS datastore from a disk_id
service_instance
The Service Instance Object containing the datastore
datastore_ref
The reference to the datastore to remove
'''
ds_props = get_properties_of_managed_object(
datastore_ref, ['host', 'info', 'name'])
ds_name = ds_props['name']
log.debug('Removing datastore \'%s\'', ds_name)
ds_hosts = ds_props.get('host')
if not ds_hosts:
raise salt.exceptions.VMwareApiError(
'Datastore \'{0}\' can\'t be removed. No '
'attached hosts found'.format(ds_name))
hostname = get_managed_object_name(ds_hosts[0].key)
host_ds_system = get_host_datastore_system(ds_hosts[0].key,
hostname=hostname)
try:
host_ds_system.RemoveDatastore(datastore_ref)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Removed datastore \'%s\'', hostname, ds_name)
def get_hosts(service_instance, datacenter_name=None, host_names=None,
cluster_name=None, get_all_hosts=False):
'''
Returns a list of vim.HostSystem objects representing ESXi hosts
in a vcenter filtered by their names and/or datacenter, cluster membership.
service_instance
The Service Instance Object from which to obtain the hosts.
datacenter_name
The datacenter name. Default is None.
host_names
The host_names to be retrieved. Default is None.
cluster_name
The cluster name - used to restrict the hosts retrieved. Only used if
the datacenter is set. This argument is optional.
get_all_hosts
Specifies whether to retrieve all hosts in the container.
Default value is False.
'''
properties = ['name']
if cluster_name and not datacenter_name:
raise salt.exceptions.ArgumentValueError(
'Must specify the datacenter when specifying the cluster')
if not host_names:
host_names = []
if not datacenter_name:
# Assume the root folder is the starting point
start_point = get_root_folder(service_instance)
else:
start_point = get_datacenter(service_instance, datacenter_name)
if cluster_name:
# Retrieval to test if cluster exists. Cluster existence only makes
# sense if the datacenter has been specified
properties.append('parent')
# Search for the objects
hosts = get_mors_with_properties(service_instance,
vim.HostSystem,
container_ref=start_point,
property_list=properties)
log.trace('Retrieved hosts: %s', [h['name'] for h in hosts])
filtered_hosts = []
for h in hosts:
# Complex conditions checking if a host should be added to the
# filtered list (either due to its name and/or cluster membership)
if cluster_name:
if not isinstance(h['parent'], vim.ClusterComputeResource):
continue
parent_name = get_managed_object_name(h['parent'])
if parent_name != cluster_name:
continue
if get_all_hosts:
filtered_hosts.append(h['object'])
continue
if h['name'] in host_names:
filtered_hosts.append(h['object'])
return filtered_hosts
def _get_scsi_address_to_lun_key_map(service_instance,
host_ref,
storage_system=None,
hostname=None):
'''
Returns a map between the scsi addresses and the keys of all luns on an ESXi
host.
map[<scsi_address>] = <lun key>
service_instance
The Service Instance Object from which to obtain the hosts
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device '
'info was not retrieved'.format(hostname))
multipath_info = device_info.multipathInfo
if not multipath_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' multipath info was not retrieved'
''.format(hostname))
if multipath_info.lun is None:
raise salt.exceptions.VMwareObjectRetrievalError(
'No luns were retrieved from host \'{0}\''.format(hostname))
lun_key_by_scsi_addr = {}
for l in multipath_info.lun:
# The vmware scsi_address may have multiple comma separated values
# The first one is the actual scsi address
lun_key_by_scsi_addr.update({p.name.split(',')[0]: l.lun
for p in l.path})
log.trace('Scsi address to lun id map on host \'%s\': %s',
hostname, lun_key_by_scsi_addr)
return lun_key_by_scsi_addr
def get_all_luns(host_ref, storage_system=None, hostname=None):
'''
Returns a list of all vim.HostScsiDisk objects in a disk
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
if not storage_system:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage system was not retrieved'
''.format(hostname))
try:
device_info = storage_system.storageDeviceInfo
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not device_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' storage device info was not retrieved'
''.format(hostname))
scsi_luns = device_info.scsiLun
if scsi_luns:
log.trace('Retrieved scsi luns in host \'%s\': %s',
hostname, [l.canonicalName for l in scsi_luns])
return scsi_luns
log.trace('Retrieved no scsi_luns in host \'%s\'', hostname)
return []
def get_scsi_address_to_lun_map(host_ref, storage_system=None, hostname=None):
'''
Returns a map of all vim.ScsiLun objects on a ESXi host keyed by their
scsi address
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
storage_system
The host's storage system. Default is None.
hostname
Name of the host. This argument is optional.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
si = get_service_instance_from_managed_object(host_ref, name=hostname)
if not storage_system:
storage_system = get_storage_system(si, host_ref, hostname)
lun_ids_to_scsi_addr_map = \
_get_scsi_address_to_lun_key_map(si, host_ref, storage_system,
hostname)
luns_to_key_map = {d.key: d for d in
get_all_luns(host_ref, storage_system, hostname)}
return {scsi_addr: luns_to_key_map[lun_key] for scsi_addr, lun_key in
six.iteritems(lun_ids_to_scsi_addr_map)}
def get_disks(host_ref, disk_ids=None, scsi_addresses=None,
get_all_disks=False):
'''
Returns a list of vim.HostScsiDisk objects representing disks
in a ESXi host, filtered by their cannonical names and scsi_addresses
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
disk_ids
The list of canonical names of the disks to be retrieved. Default value
is None
scsi_addresses
The list of scsi addresses of the disks to be retrieved. Default value
is None
get_all_disks
Specifies whether to retrieve all disks in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disks:
log.trace('Retrieving all disks in host \'%s\'', hostname)
else:
log.trace('Retrieving disks in host \'%s\': ids = (%s); scsi '
'addresses = (%s)', hostname, disk_ids, scsi_addresses)
if not (disk_ids or scsi_addresses):
return []
si = get_service_instance_from_managed_object(host_ref, name=hostname)
storage_system = get_storage_system(si, host_ref, hostname)
disk_keys = []
if scsi_addresses:
# convert the scsi addresses to disk keys
lun_key_by_scsi_addr = _get_scsi_address_to_lun_key_map(si, host_ref,
storage_system,
hostname)
disk_keys = [key for scsi_addr, key
in six.iteritems(lun_key_by_scsi_addr)
if scsi_addr in scsi_addresses]
log.trace('disk_keys based on scsi_addresses = %s', disk_keys)
scsi_luns = get_all_luns(host_ref, storage_system)
scsi_disks = [disk for disk in scsi_luns
if isinstance(disk, vim.HostScsiDisk) and (
get_all_disks or
# Filter by canonical name
(disk_ids and (disk.canonicalName in disk_ids)) or
# Filter by disk keys from scsi addresses
(disk.key in disk_keys))]
log.trace('Retrieved disks in host \'%s\': %s',
hostname, [d.canonicalName for d in scsi_disks])
return scsi_disks
def get_disk_partition_info(host_ref, disk_id, storage_system=None):
'''
Returns all partitions on a disk
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
storage_system
The ESXi host's storage system. Default is None.
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
props = get_properties_of_managed_object(storage_system,
['storageDeviceInfo.scsiLun'])
if not props.get('storageDeviceInfo.scsiLun'):
raise salt.exceptions.VMwareObjectRetrievalError(
'No devices were retrieved in host \'{0}\''.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(props['storageDeviceInfo.scsiLun']),
', '.join([l.canonicalName
for l in props['storageDeviceInfo.scsiLun']])
)
disks = [l for l in props['storageDeviceInfo.scsiLun']
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
partition_info = _get_partition_info(storage_system, disks[0].devicePath)
log.trace('[%s] Retrieved %s partition(s) on disk \'%s\'',
hostname, len(partition_info.spec.partition), disk_id)
return partition_info
def erase_disk_partitions(service_instance, host_ref, disk_id,
hostname=None, storage_system=None):
'''
Erases all partitions on a disk
in a vcenter filtered by their names and/or datacenter, cluster membership
service_instance
The Service Instance Object from which to obtain all information
host_ref
The reference of the ESXi host containing the disk
disk_id
The canonical name of the disk whose partitions are to be removed
hostname
The ESXi hostname. Default is None.
storage_system
The ESXi host's storage system. Default is None.
'''
if not hostname:
hostname = get_managed_object_name(host_ref)
if not storage_system:
storage_system = get_storage_system(service_instance, host_ref,
hostname)
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.storageSystem',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostStorageSystem,
['storageDeviceInfo.scsiLun'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results:
raise salt.exceptions.VMwareObjectRetrievalError(
'Host\'s \'{0}\' devices were not retrieved'.format(hostname))
log.trace(
'[%s] Retrieved %s devices: %s',
hostname,
len(results[0].get('storageDeviceInfo.scsiLun', [])),
', '.join([l.canonicalName for l in
results[0].get('storageDeviceInfo.scsiLun', [])])
)
disks = [l for l in results[0].get('storageDeviceInfo.scsiLun', [])
if isinstance(l, vim.HostScsiDisk) and
l.canonicalName == disk_id]
if not disks:
raise salt.exceptions.VMwareObjectRetrievalError(
'Disk \'{0}\' was not found in host \'{1}\''
''.format(disk_id, hostname))
log.trace('[%s] device_path = %s', hostname, disks[0].devicePath)
# Erase the partitions by setting an empty partition spec
try:
storage_system.UpdateDiskPartitions(disks[0].devicePath,
vim.HostDiskPartitionSpec())
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
log.trace('[%s] Erased partitions on disk \'%s\'', hostname, disk_id)
def get_diskgroups(host_ref, cache_disk_ids=None, get_all_disk_groups=False):
'''
Returns a list of vim.VsanHostDiskMapping objects representing disks
in a ESXi host, filtered by their cannonical names.
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
cache_disk_ids
The list of cannonical names of the cache disks to be retrieved. The
canonical name of the cache disk is enough to identify the disk group
because it is guaranteed to have one and only one cache disk.
Default is None.
get_all_disk_groups
Specifies whether to retrieve all disks groups in the host.
Default value is False.
'''
hostname = get_managed_object_name(host_ref)
if get_all_disk_groups:
log.trace('Retrieving all disk groups on host \'%s\'', hostname)
else:
log.trace('Retrieving disk groups from host \'%s\', with cache disk '
'ids : (%s)', hostname, cache_disk_ids)
if not cache_disk_ids:
return []
try:
vsan_host_config = host_ref.config.vsanHostConfig
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if not vsan_host_config:
raise salt.exceptions.VMwareObjectRetrievalError(
'No host config found on host \'{0}\''.format(hostname))
vsan_storage_info = vsan_host_config.storageInfo
if not vsan_storage_info:
raise salt.exceptions.VMwareObjectRetrievalError(
'No vsan storage info found on host \'{0}\''.format(hostname))
vsan_disk_mappings = vsan_storage_info.diskMapping
if not vsan_disk_mappings:
return []
disk_groups = [dm for dm in vsan_disk_mappings if
(get_all_disk_groups or
(dm.ssd.canonicalName in cache_disk_ids))]
log.trace(
'Retrieved disk groups on host \'%s\', with cache disk ids : %s',
hostname, [d.ssd.canonicalName for d in disk_groups]
)
return disk_groups
def _check_disks_in_diskgroup(disk_group, cache_disk_id, capacity_disk_ids):
'''
Checks that the disks in a disk group are as expected and raises
CheckError exceptions if the check fails
'''
if not disk_group.ssd.canonicalName == cache_disk_id:
raise salt.exceptions.ArgumentValueError(
'Incorrect diskgroup cache disk; got id: \'{0}\'; expected id: '
'\'{1}\''.format(disk_group.ssd.canonicalName, cache_disk_id))
non_ssd_disks = [d.canonicalName for d in disk_group.nonSsd]
if sorted(non_ssd_disks) != sorted(capacity_disk_ids):
raise salt.exceptions.ArgumentValueError(
'Incorrect capacity disks; got ids: \'{0}\'; expected ids: \'{1}\''
''.format(sorted(non_ssd_disks),
sorted(capacity_disk_ids)))
log.trace('Checked disks in diskgroup with cache disk id \'%s\'',
cache_disk_id)
return True
# TODO Support host caches on multiple datastores
def get_host_cache(host_ref, host_cache_manager=None):
'''
Returns a vim.HostScsiDisk if the host cache is configured on the specified
host, other wise returns None
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
service_instance = get_service_instance_from_managed_object(host_ref)
log.trace('Retrieving the host cache on host \'%s\'', hostname)
if not host_cache_manager:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='configManager.cacheConfigurationManager',
type=vim.HostSystem,
skip=False)
results = get_mors_with_properties(service_instance,
vim.HostCacheConfigurationManager,
['cacheConfigurationInfo'],
container_ref=host_ref,
traversal_spec=traversal_spec)
if not results or not results[0].get('cacheConfigurationInfo'):
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results[0]['cacheConfigurationInfo'][0]
else:
results = get_properties_of_managed_object(host_cache_manager,
['cacheConfigurationInfo'])
if not results:
log.trace('Host \'%s\' has no host cache', hostname)
return None
return results['cacheConfigurationInfo'][0]
# TODO Support host caches on multiple datastores
def configure_host_cache(host_ref, datastore_ref, swap_size_MiB,
host_cache_manager=None):
'''
Configures the host cahe of the specified host
host_ref
The vim.HostSystem object representing the host that contains the
requested disks.
datastore_ref
The vim.Datastore opject representing the datastore the host cache will
be configured on.
swap_size_MiB
The size in Mibibytes of the swap.
host_cache_manager
The vim.HostCacheConfigurationManager object representing the cache
configuration manager on the specified host. Default is None. If None,
it will be retrieved in the method
'''
hostname = get_managed_object_name(host_ref)
if not host_cache_manager:
props = get_properties_of_managed_object(
host_ref, ['configManager.cacheConfigurationManager'])
if not props.get('configManager.cacheConfigurationManager'):
raise salt.exceptions.VMwareObjectRetrievalError(
'Host \'{0}\' has no host cache'.format(hostname))
host_cache_manager = props['configManager.cacheConfigurationManager']
log.trace('Configuring the host cache on host \'%s\', datastore \'%s\', '
'swap size=%s MiB', hostname, datastore_ref.name, swap_size_MiB)
spec = vim.HostCacheConfigurationSpec(
datastore=datastore_ref,
swapSize=swap_size_MiB)
log.trace('host_cache_spec=%s', spec)
try:
task = host_cache_manager.ConfigureHostCache_Task(spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, hostname, 'HostCacheConfigurationTask')
log.trace('Configured host cache on host \'%s\'', hostname)
return True
def list_hosts(service_instance):
'''
Returns a list of hosts associated with a given service instance.
service_instance
The Service Instance Object from which to obtain hosts.
'''
return list_objects(service_instance, vim.HostSystem)
def get_resource_pools(service_instance, resource_pool_names, datacenter_name=None,
get_all_resource_pools=False):
'''
Retrieves resource pool objects
service_instance
The service instance object to query the vCenter
resource_pool_names
Resource pool names
datacenter_name
Name of the datacenter where the resource pool is available
get_all_resource_pools
Boolean
return
Resourcepool managed object reference
'''
properties = ['name']
if not resource_pool_names:
resource_pool_names = []
if datacenter_name:
container_ref = get_datacenter(service_instance, datacenter_name)
else:
container_ref = get_root_folder(service_instance)
resource_pools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=container_ref,
property_list=properties)
selected_pools = []
for pool in resource_pools:
if get_all_resource_pools or (pool['name'] in resource_pool_names):
selected_pools.append(pool['object'])
if not selected_pools:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pools with properties '
'names={} get_all={} could not be found'.format(selected_pools,
get_all_resource_pools))
return selected_pools
def list_resourcepools(service_instance):
'''
Returns a list of resource pools associated with a given service instance.
service_instance
The Service Instance Object from which to obtain resource pools.
'''
return list_objects(service_instance, vim.ResourcePool)
def list_networks(service_instance):
'''
Returns a list of networks associated with a given service instance.
service_instance
The Service Instance Object from which to obtain networks.
'''
return list_objects(service_instance, vim.Network)
def list_vms(service_instance):
'''
Returns a list of VMs associated with a given service instance.
service_instance
The Service Instance Object from which to obtain VMs.
'''
return list_objects(service_instance, vim.VirtualMachine)
def list_folders(service_instance):
'''
Returns a list of folders associated with a given service instance.
service_instance
The Service Instance Object from which to obtain folders.
'''
return list_objects(service_instance, vim.Folder)
def list_dvs(service_instance):
'''
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.DistributedVirtualSwitch)
def list_vapps(service_instance):
'''
Returns a list of vApps associated with a given service instance.
service_instance
The Service Instance Object from which to obtain vApps.
'''
return list_objects(service_instance, vim.VirtualApp)
def list_portgroups(service_instance):
'''
Returns a list of distributed virtual portgroups associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
'''
return list_objects(service_instance, vim.dvs.DistributedVirtualPortgroup)
def wait_for_task(task, instance_name, task_type, sleep_seconds=1, log_level='debug'):
'''
Waits for a task to be completed.
task
The task to wait for.
instance_name
The name of the ESXi host, vCenter Server, or Virtual Machine that
the task is being run on.
task_type
The type of task being performed. Useful information for debugging purposes.
sleep_seconds
The number of seconds to wait before querying the task again.
Defaults to ``1`` second.
log_level
The level at which to log task information. Default is ``debug``,
but ``info`` is also supported.
'''
time_counter = 0
start_time = time.time()
log.trace('task = %s, task_type = %s', task, task.__class__.__name__)
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
while task_info.state == 'running' or task_info.state == 'queued':
if time_counter % sleep_seconds == 0:
msg = '[ {0} ] Waiting for {1} task to finish [{2} s]'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
time.sleep(1.0 - ((time.time() - start_time) % 1.0))
time_counter += 1
try:
task_info = task.info
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
if task_info.state == 'success':
msg = '[ {0} ] Successfully completed {1} task in {2} seconds'.format(
instance_name, task_type, time_counter)
if log_level == 'info':
log.info(msg)
else:
log.debug(msg)
# task is in a successful state
return task_info.result
else:
# task is in an error state
try:
raise task_info.error
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.FileNotFound as exc:
log.exception(exc)
raise salt.exceptions.VMwareFileNotFoundError(exc.msg)
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.fault.SystemError as exc:
log.exception(exc)
raise salt.exceptions.VMwareSystemError(exc.msg)
except vmodl.fault.InvalidArgument as exc:
log.exception(exc)
exc_message = exc.msg
if exc.faultMessage:
exc_message = '{0} ({1})'.format(exc_message,
exc.faultMessage[0].message)
raise salt.exceptions.VMwareApiError(exc_message)
def get_vm_by_property(service_instance, name, datacenter=None, vm_properties=None,
traversal_spec=None, parent_ref=None):
'''
Get virtual machine properties based on the traversal specs and properties list,
returns Virtual Machine object with properties.
service_instance
Service instance object to access vCenter
name
Name of the virtual machine.
datacenter
Datacenter name
vm_properties
List of vm properties.
traversal_spec
Traversal Spec object(s) for searching.
parent_ref
Container Reference object for searching under a given object.
'''
if datacenter and not parent_ref:
parent_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
if not vm_properties:
vm_properties = ['name',
'config.hardware.device',
'summary.storage.committed',
'summary.storage.uncommitted',
'summary.storage.unshared',
'layoutEx.file',
'config.guestFullName',
'config.guestId',
'guest.net',
'config.hardware.memoryMB',
'config.hardware.numCPU',
'config.files.vmPathName',
'summary.runtime.powerState',
'guest.toolsStatus']
vm_list = salt.utils.vmware.get_mors_with_properties(service_instance,
vim.VirtualMachine,
vm_properties,
container_ref=parent_ref,
traversal_spec=traversal_spec)
vm_formatted = [vm for vm in vm_list if vm['name'] == name]
if not vm_formatted:
raise salt.exceptions.VMwareObjectRetrievalError('The virtual machine was not found.')
elif len(vm_formatted) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple virtual machines were found with the'
'same name, please specify a container.']))
return vm_formatted[0]
def get_folder(service_instance, datacenter, placement, base_vm_name=None):
'''
Returns a Folder Object
service_instance
Service instance object
datacenter
Name of the datacenter
placement
Placement dictionary
base_vm_name
Existing virtual machine name (for cloning)
'''
log.trace('Retrieving folder information')
if base_vm_name:
vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=['name'])
vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=['parent'])
if 'parent' in vm_props:
folder_object = vm_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The virtual machine parent',
'object is not defined']))
elif 'folder' in placement:
folder_objects = salt.utils.vmware.get_folders(service_instance, [placement['folder']], datacenter)
if len(folder_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified folder {0}'.format(placement['folder'])]))
folder_object = folder_objects[0]
elif datacenter:
datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter)
dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=['vmFolder'])
if 'vmFolder' in dc_props:
folder_object = dc_props['vmFolder']
else:
raise salt.exceptions.VMwareObjectRetrievalError('The datacenter vm folder object is not defined')
return folder_object
def get_placement(service_instance, datacenter, placement=None):
'''
To create a virtual machine a resource pool needs to be supplied, we would like to use the strictest as possible.
datacenter
Name of the datacenter
placement
Dictionary with the placement info, cluster, host resource pool name
return
Resource pool, cluster and host object if any applies
'''
log.trace('Retrieving placement information')
resourcepool_object, placement_object = None, None
if 'host' in placement:
host_objects = get_hosts(service_instance, datacenter_name=datacenter, host_names=[placement['host']])
if not host_objects:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The specified host',
'{0} cannot be found.'.format(placement['host'])]))
try:
host_props = \
get_properties_of_managed_object(host_objects[0],
properties=['resourcePool'])
resourcepool_object = host_props['resourcePool']
except vmodl.query.InvalidProperty:
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec(
path='parent',
skip=True,
type=vim.HostSystem,
selectSet=[vmodl.query.PropertyCollector.TraversalSpec(
path='resourcePool',
skip=False,
type=vim.ClusterComputeResource)])
resourcepools = get_mors_with_properties(service_instance,
vim.ResourcePool,
container_ref=host_objects[0],
property_list=['name'],
traversal_spec=traversal_spec)
if resourcepools:
resourcepool_object = resourcepools[0]['object']
else:
raise salt.exceptions.VMwareObjectRetrievalError(
'The resource pool of host {0} cannot be found.'.format(placement['host']))
placement_object = host_objects[0]
elif 'resourcepool' in placement:
resourcepool_objects = get_resource_pools(service_instance,
[placement['resourcepool']],
datacenter_name=datacenter)
if len(resourcepool_objects) > 1:
raise salt.exceptions.VMwareMultipleObjectsError(' '.join([
'Multiple instances are available of the',
'specified host {}.'.format(placement['host'])]))
resourcepool_object = resourcepool_objects[0]
res_props = get_properties_of_managed_object(resourcepool_object,
properties=['parent'])
if 'parent' in res_props:
placement_object = res_props['parent']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The resource pool\'s parent',
'object is not defined']))
elif 'cluster' in placement:
datacenter_object = get_datacenter(service_instance, datacenter)
cluster_object = get_cluster(datacenter_object, placement['cluster'])
clus_props = get_properties_of_managed_object(cluster_object,
properties=['resourcePool'])
if 'resourcePool' in clus_props:
resourcepool_object = clus_props['resourcePool']
else:
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'The cluster\'s resource pool',
'object is not defined']))
placement_object = cluster_object
else:
# We are checking the schema for this object, this exception should never be raised
raise salt.exceptions.VMwareObjectRetrievalError(' '.join([
'Placement is not defined.']))
return (resourcepool_object, placement_object)
def convert_to_kb(unit, size):
'''
Converts the given size to KB based on the unit, returns a long integer.
unit
Unit of the size eg. GB; Note: to VMware a GB is the same as GiB = 1024MiB
size
Number which represents the size
'''
if unit.lower() == 'gb':
# vCenter needs long value
target_size = int(size * 1024 * 1024)
elif unit.lower() == 'mb':
target_size = int(size * 1024)
elif unit.lower() == 'kb':
target_size = int(size)
else:
raise salt.exceptions.ArgumentValueError('The unit is not specified')
return {'size': target_size, 'unit': 'KB'}
def power_cycle_vm(virtual_machine, action='on'):
'''
Powers on/off a virtual machine specified by it's name.
virtual_machine
vim.VirtualMachine object to power on/off virtual machine
action
Operation option to power on/off the machine
'''
if action == 'on':
try:
task = virtual_machine.PowerOn()
task_name = 'power on'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
elif action == 'off':
try:
task = virtual_machine.PowerOff()
task_name = 'power off'
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
else:
raise salt.exceptions.ArgumentValueError('The given action is not supported')
try:
wait_for_task(task, get_managed_object_name(virtual_machine), task_name)
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwarePowerOnError(' '.join([
'An error occurred during power',
'operation, a file was not found: {0}'.format(exc)]))
return virtual_machine
def create_vm(vm_name, vm_config_spec, folder_object, resourcepool_object, host_object=None):
'''
Creates virtual machine from config spec
vm_name
Virtual machine name to be created
vm_config_spec
Virtual Machine Config Spec object
folder_object
vm Folder managed object reference
resourcepool_object
Resource pool object where the machine will be created
host_object
Host object where the machine will ne placed (optional)
return
Virtual Machine managed object reference
'''
try:
if host_object and isinstance(host_object, vim.HostSystem):
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object,
host=host_object)
else:
task = folder_object.CreateVM_Task(vm_config_spec,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_object = wait_for_task(task, vm_name, 'CreateVM Task', 10, 'info')
return vm_object
def register_vm(datacenter, name, vmx_path, resourcepool_object, host_object=None):
'''
Registers a virtual machine to the inventory with the given vmx file, on success
it returns the vim.VirtualMachine managed object reference
datacenter
Datacenter object of the virtual machine, vim.Datacenter object
name
Name of the virtual machine
vmx_path:
Full path to the vmx file, datastore name should be included
resourcepool
Placement resource pool of the virtual machine, vim.ResourcePool object
host
Placement host of the virtual machine, vim.HostSystem object
'''
try:
if host_object:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
host=host_object,
pool=resourcepool_object)
else:
task = datacenter.vmFolder.RegisterVM_Task(path=vmx_path, name=name,
asTemplate=False,
pool=resourcepool_object)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
try:
vm_ref = wait_for_task(task, name, 'RegisterVM Task')
except salt.exceptions.VMwareFileNotFoundError as exc:
raise salt.exceptions.VMwareVmRegisterError(
'An error occurred during registration operation, the '
'configuration file was not found: {0}'.format(exc))
return vm_ref
def update_vm(vm_ref, vm_config_spec):
'''
Updates the virtual machine configuration with the given object
vm_ref
Virtual machine managed object reference
vm_config_spec
Virtual machine config spec object to update
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Updating vm \'%s\'', vm_name)
try:
task = vm_ref.ReconfigVM_Task(vm_config_spec)
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
vm_ref = wait_for_task(task, vm_name, 'ReconfigureVM Task')
return vm_ref
def delete_vm(vm_ref):
'''
Destroys the virtual machine
vm_ref
Managed object reference of a virtual machine object
'''
vm_name = get_managed_object_name(vm_ref)
log.trace('Destroying vm \'%s\'', vm_name)
try:
task = vm_ref.Destroy_Task()
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
wait_for_task(task, vm_name, 'Destroy Task')
|
saltstack/salt
|
salt/auth/pki.py
|
auth
|
python
|
def auth(username, password, **kwargs):
'''
Returns True if the given user cert (password is the cert contents)
was issued by the CA and if cert's Common Name is equal to username.
Returns False otherwise.
``username``: we need it to run the auth function from CLI/API;
it should be in master config auth/acl
``password``: contents of user certificate (pem-encoded user public key);
why "password"? For CLI, it's the only available name
Configure the CA cert in the master config file:
.. code-block:: yaml
external_auth:
pki:
ca_file: /etc/pki/tls/ca_certs/trusted-ca.crt
your_user:
- .*
'''
pem = password
cacert_file = __salt__['config.get']('external_auth:pki:ca_file')
log.debug('Attempting to authenticate via pki.')
log.debug('Using CA file: %s', cacert_file)
log.debug('Certificate contents: %s', pem)
if HAS_M2:
cert = X509.load_cert_string(pem, X509.FORMAT_PEM)
cacert = X509.load_cert(cacert_file, X509.FORMAT_PEM)
if cert.verify(cacert.get_pubkey()):
log.info('Successfully authenticated certificate: %s', pem)
return True
else:
log.info('Failed to authenticate certificate: %s', pem)
return False
c = OpenSSL.crypto
cert = c.load_certificate(c.FILETYPE_PEM, pem)
with salt.utils.files.fopen(cacert_file) as f:
cacert = c.load_certificate(c.FILETYPE_PEM, f.read())
# Get the signing algorithm
algo = cert.get_signature_algorithm()
# Get the ASN1 format of the certificate
cert_asn1 = c.dump_certificate(c.FILETYPE_ASN1, cert)
# Decode the certificate
der = asn1.DerSequence()
der.decode(cert_asn1)
# The certificate has three parts:
# - certificate
# - signature algorithm
# - signature
# http://usefulfor.com/nothing/2009/06/10/x509-certificate-basics/
der_cert = der[0]
#der_algo = der[1]
der_sig = der[2]
# The signature is a BIT STRING (Type 3)
# Decode that as well
der_sig_in = asn1.DerObject()
der_sig_in.decode(der_sig)
# Get the payload
sig0 = der_sig_in.payload
# Do the following to see a validation error for tests
# der_cert=der_cert[:20]+'1'+der_cert[21:]
# First byte is the number of unused bits. This should be 0
# http://msdn.microsoft.com/en-us/library/windows/desktop/bb540792(v=vs.85).aspx
if sig0[0] != '\x00':
raise Exception('Number of unused bits is strange')
# Now get the signature itself
sig = sig0[1:]
# And verify the certificate
try:
c.verify(cacert, sig, der_cert, algo)
assert dict(cert.get_subject().get_components())['CN'] == username, "Certificate's CN should match the username"
log.info('Successfully authenticated certificate: %s', pem)
return True
except (OpenSSL.crypto.Error, AssertionError):
log.info('Failed to authenticate certificate: %s', pem)
return False
|
Returns True if the given user cert (password is the cert contents)
was issued by the CA and if cert's Common Name is equal to username.
Returns False otherwise.
``username``: we need it to run the auth function from CLI/API;
it should be in master config auth/acl
``password``: contents of user certificate (pem-encoded user public key);
why "password"? For CLI, it's the only available name
Configure the CA cert in the master config file:
.. code-block:: yaml
external_auth:
pki:
ca_file: /etc/pki/tls/ca_certs/trusted-ca.crt
your_user:
- .*
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/auth/pki.py#L55-L145
|
[
"def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n"
] |
# -*- coding: utf-8 -*-
# Majority of code shamelessly stolen from
# http://www.v13.gr/blog/?p=303
'''
Authenticate via a PKI certificate.
.. note::
This module is Experimental and should be used with caution
Provides an authenticate function that will allow the caller to authenticate
a user via their public cert against a pre-defined Certificate Authority.
TODO: Add a 'ca_dir' option to configure a directory of CA files, a la Apache.
:depends: - pyOpenSSL module
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import third party libs
# pylint: disable=import-error
try:
try:
from M2Crypto import X509
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Util import asn1
except ImportError:
from Crypto.Util import asn1
import OpenSSL
HAS_DEPS = True
except ImportError:
HAS_DEPS = False
# pylint: enable=import-error
# Import salt libs
import salt.utils.files
log = logging.getLogger(__name__)
def __virtual__():
'''
Requires newer pycrypto and pyOpenSSL
'''
if HAS_DEPS:
return True
return False
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer._get_diffs
|
python
|
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
|
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L166-L192
|
[
"def _get_diffs(cls, dict1, dict2, ignore_missing_keys):\n '''\n Returns a dict with the differences between dict1 and dict2\n\n Notes:\n Keys that only exist in dict2 are not included in the diff if\n ignore_missing_keys is True, otherwise they are\n Simple compares are done on lists\n '''\n ret_dict = {}\n for p in dict1.keys():\n if p not in dict2:\n ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})\n elif dict1[p] != dict2[p]:\n if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):\n sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],\n ignore_missing_keys)\n if sub_diff_dict:\n ret_dict.update({p: sub_diff_dict})\n else:\n ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})\n if not ignore_missing_keys:\n for p in dict2.keys():\n if p not in dict1.keys():\n ret_dict.update({p: {'new': cls.NONE_VALUE,\n 'old': dict2[p]}})\n return ret_dict\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer._get_values
|
python
|
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
|
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L195-L209
|
[
"def _get_values(cls, diff_dict, type='new'):\n '''\n Returns a dictionaries with the 'new' values in a diff dict.\n\n type\n Which values to return, 'new' or 'old'\n '''\n ret_dict = {}\n for p in diff_dict.keys():\n if type in diff_dict[p].keys():\n ret_dict.update({p: diff_dict[p][type]})\n else:\n ret_dict.update(\n {p: cls._get_values(diff_dict[p], type=type)})\n return ret_dict\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer._get_changes
|
python
|
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
|
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L212-L245
|
[
"def _get_changes(cls, diff_dict):\n '''\n Returns a list of string message with the differences in a diff dict.\n\n Each inner difference is tabulated two space deeper\n '''\n changes_strings = []\n for p in sorted(diff_dict.keys()):\n if sorted(diff_dict[p].keys()) == ['new', 'old']:\n # Some string formatting\n old_value = diff_dict[p]['old']\n if diff_dict[p]['old'] == cls.NONE_VALUE:\n old_value = 'nothing'\n elif isinstance(diff_dict[p]['old'], six.string_types):\n old_value = '\\'{0}\\''.format(diff_dict[p]['old'])\n elif isinstance(diff_dict[p]['old'], list):\n old_value = '\\'{0}\\''.format(\n ', '.join(diff_dict[p]['old']))\n new_value = diff_dict[p]['new']\n if diff_dict[p]['new'] == cls.NONE_VALUE:\n new_value = 'nothing'\n elif isinstance(diff_dict[p]['new'], six.string_types):\n new_value = '\\'{0}\\''.format(diff_dict[p]['new'])\n elif isinstance(diff_dict[p]['new'], list):\n new_value = '\\'{0}\\''.format(', '.join(diff_dict[p]['new']))\n changes_strings.append('{0} from {1} to {2}'.format(\n p, old_value, new_value))\n else:\n sub_changes = cls._get_changes(diff_dict[p])\n if sub_changes:\n changes_strings.append('{0}:'.format(p))\n changes_strings.extend([' {0}'.format(c)\n for c in sub_changes])\n return changes_strings\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer.added
|
python
|
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
|
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L247-L269
|
[
"def _added(diffs, prefix):\n keys = []\n for key in diffs.keys():\n if isinstance(diffs[key], dict) and 'old' not in diffs[key]:\n keys.extend(_added(diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n elif diffs[key]['old'] == self.NONE_VALUE:\n if isinstance(diffs[key]['new'], dict):\n keys.extend(\n _added(diffs[key]['new'],\n prefix='{0}{1}.'.format(prefix, key)))\n else:\n keys.append('{0}{1}'.format(prefix, key))\n return keys\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer.removed
|
python
|
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
|
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L271-L292
|
[
"def _removed(diffs, prefix):\n keys = []\n for key in diffs.keys():\n if isinstance(diffs[key], dict) and 'old' not in diffs[key]:\n keys.extend(_removed(diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n elif diffs[key]['new'] == self.NONE_VALUE:\n keys.append('{0}{1}'.format(prefix, key))\n elif isinstance(diffs[key]['new'], dict):\n keys.extend(\n _removed(diffs[key]['new'],\n prefix='{0}{1}.'.format(prefix, key)))\n return keys\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer.changed
|
python
|
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
|
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L294-L340
|
[
"def _changed(diffs, prefix):\n keys = []\n for key in diffs.keys():\n if not isinstance(diffs[key], dict):\n continue\n\n if isinstance(diffs[key], dict) and 'old' not in diffs[key]:\n keys.extend(_changed(diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n continue\n if self.ignore_unset_values:\n if 'old' in diffs[key] and 'new' in diffs[key] and \\\n diffs[key]['old'] != self.NONE_VALUE and \\\n diffs[key]['new'] != self.NONE_VALUE:\n if isinstance(diffs[key]['new'], dict):\n keys.extend(\n _changed(diffs[key]['new'],\n prefix='{0}{1}.'.format(prefix, key)))\n else:\n keys.append('{0}{1}'.format(prefix, key))\n elif isinstance(diffs[key], dict):\n keys.extend(\n _changed(diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n else:\n if 'old' in diffs[key] and 'new' in diffs[key]:\n if isinstance(diffs[key]['new'], dict):\n keys.extend(\n _changed(diffs[key]['new'],\n prefix='{0}{1}.'.format(prefix, key)))\n else:\n keys.append('{0}{1}'.format(prefix, key))\n elif isinstance(diffs[key], dict):\n keys.extend(\n _changed(diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n\n return keys\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/utils/dictdiffer.py
|
RecursiveDictDiffer.unchanged
|
python
|
def unchanged(self):
'''
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _unchanged(current_dict, diffs, prefix):
keys = []
for key in current_dict.keys():
if key not in diffs:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(current_dict[key], dict):
if 'new' in diffs[key]:
# There is a diff
continue
else:
keys.extend(
_unchanged(current_dict[key],
diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_unchanged(self.current_dict, self._diffs, prefix=''))
|
Returns all keys that have been unchanged.
If the keys are in child dictionaries they will be represented with
. notation
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/dictdiffer.py#L342-L365
|
[
"def _unchanged(current_dict, diffs, prefix):\n keys = []\n for key in current_dict.keys():\n if key not in diffs:\n keys.append('{0}{1}'.format(prefix, key))\n elif isinstance(current_dict[key], dict):\n if 'new' in diffs[key]:\n # There is a diff\n continue\n else:\n keys.extend(\n _unchanged(current_dict[key],\n diffs[key],\n prefix='{0}{1}.'.format(prefix, key)))\n\n return keys\n"
] |
class RecursiveDictDiffer(DictDiffer):
'''
Calculates a recursive diff between the current_dict and the past_dict
creating a diff in the format
{'new': new_value, 'old': old_value}
It recursively searches differences in common keys whose values are
dictionaries creating a diff dict in the format
{'common_key' : {'new': new_value, 'old': old_value}
The class overrides all DictDiffer methods, returning lists of keys and
subkeys using the . notation (i.e 'common_key1.common_key2.changed_key')
The class provides access to:
(1) the added, removed, changes keys and subkeys (using the . notation)
``added``, ``removed``, ``changed`` methods
(2) the diffs in the format aboce (diff property)
``diffs`` property
(3) a dict with the new changed values only (new_values property)
``new_values`` property
(4) a dict with the old changed values only (old_values property)
``old_values`` property
(5) a string representation of the changes in the format:
``changes_str`` property
Note:
The <_null_> value is a reserved value
.. code-block:: text
common_key1:
common_key2:
changed_key1 from '<old_str>' to '<new_str>'
changed_key2 from '[<old_elem1>, ..]' to '[<new_elem1>, ..]'
common_key3:
changed_key3 from <old_int> to <new_int>
'''
NONE_VALUE = '<_null_>'
def __init__(self, past_dict, current_dict, ignore_missing_keys):
'''
past_dict
Past dictionary.
current_dict
Current dictionary.
ignore_missing_keys
Flag specifying whether to ignore keys that no longer exist in the
current_dict, but exist in the past_dict. If true, the diff will
not contain the missing keys.
'''
super(RecursiveDictDiffer, self).__init__(current_dict, past_dict)
self._diffs = \
self._get_diffs(self.current_dict, self.past_dict,
ignore_missing_keys)
# Ignores unet values when assessing the changes
self.ignore_unset_values = True
@classmethod
def _get_diffs(cls, dict1, dict2, ignore_missing_keys):
'''
Returns a dict with the differences between dict1 and dict2
Notes:
Keys that only exist in dict2 are not included in the diff if
ignore_missing_keys is True, otherwise they are
Simple compares are done on lists
'''
ret_dict = {}
for p in dict1.keys():
if p not in dict2:
ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}})
elif dict1[p] != dict2[p]:
if isinstance(dict1[p], dict) and isinstance(dict2[p], dict):
sub_diff_dict = cls._get_diffs(dict1[p], dict2[p],
ignore_missing_keys)
if sub_diff_dict:
ret_dict.update({p: sub_diff_dict})
else:
ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}})
if not ignore_missing_keys:
for p in dict2.keys():
if p not in dict1.keys():
ret_dict.update({p: {'new': cls.NONE_VALUE,
'old': dict2[p]}})
return ret_dict
@classmethod
def _get_values(cls, diff_dict, type='new'):
'''
Returns a dictionaries with the 'new' values in a diff dict.
type
Which values to return, 'new' or 'old'
'''
ret_dict = {}
for p in diff_dict.keys():
if type in diff_dict[p].keys():
ret_dict.update({p: diff_dict[p][type]})
else:
ret_dict.update(
{p: cls._get_values(diff_dict[p], type=type)})
return ret_dict
@classmethod
def _get_changes(cls, diff_dict):
'''
Returns a list of string message with the differences in a diff dict.
Each inner difference is tabulated two space deeper
'''
changes_strings = []
for p in sorted(diff_dict.keys()):
if sorted(diff_dict[p].keys()) == ['new', 'old']:
# Some string formatting
old_value = diff_dict[p]['old']
if diff_dict[p]['old'] == cls.NONE_VALUE:
old_value = 'nothing'
elif isinstance(diff_dict[p]['old'], six.string_types):
old_value = '\'{0}\''.format(diff_dict[p]['old'])
elif isinstance(diff_dict[p]['old'], list):
old_value = '\'{0}\''.format(
', '.join(diff_dict[p]['old']))
new_value = diff_dict[p]['new']
if diff_dict[p]['new'] == cls.NONE_VALUE:
new_value = 'nothing'
elif isinstance(diff_dict[p]['new'], six.string_types):
new_value = '\'{0}\''.format(diff_dict[p]['new'])
elif isinstance(diff_dict[p]['new'], list):
new_value = '\'{0}\''.format(', '.join(diff_dict[p]['new']))
changes_strings.append('{0} from {1} to {2}'.format(
p, old_value, new_value))
else:
sub_changes = cls._get_changes(diff_dict[p])
if sub_changes:
changes_strings.append('{0}:'.format(p))
changes_strings.extend([' {0}'.format(c)
for c in sub_changes])
return changes_strings
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix=''))
def removed(self):
'''
Returns all keys that have been removed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _removed(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_removed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['new'] == self.NONE_VALUE:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key]['new'], dict):
keys.extend(
_removed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_removed(self._diffs, prefix=''))
def changed(self):
'''
Returns all keys that have been changed.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _changed(diffs, prefix):
keys = []
for key in diffs.keys():
if not isinstance(diffs[key], dict):
continue
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
continue
if self.ignore_unset_values:
if 'old' in diffs[key] and 'new' in diffs[key] and \
diffs[key]['old'] != self.NONE_VALUE and \
diffs[key]['new'] != self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
else:
if 'old' in diffs[key] and 'new' in diffs[key]:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_changed(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
elif isinstance(diffs[key], dict):
keys.extend(
_changed(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
return keys
return sorted(_changed(self._diffs, prefix=''))
@property
def diffs(self):
'''Returns a dict with the recursive diffs current_dict - past_dict'''
return self._diffs
@property
def new_values(self):
'''Returns a dictionary with the new values'''
return self._get_values(self._diffs, type='new')
@property
def old_values(self):
'''Returns a dictionary with the old values'''
return self._get_values(self._diffs, type='old')
@property
def changes_str(self):
'''Returns a string describing the changes'''
return '\n'.join(self._get_changes(self._diffs))
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
_get_computer_object
|
python
|
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
|
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L42-L51
| null |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
_get_group_object
|
python
|
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
|
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L54-L67
| null |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
_get_all_groups
|
python
|
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
|
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L70-L82
| null |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
add
|
python
|
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
|
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L96-L128
|
[
"def info(name):\n '''\n Return information about a group\n\n Args:\n\n name (str):\n The name of the group for which to get information\n\n Returns:\n dict: A dictionary of information about the group\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' group.info foo\n '''\n try:\n groupObj = _get_group_object(name)\n gr_name = groupObj.Name\n gr_mem = [_get_username(x) for x in groupObj.members()]\n except pywintypes.com_error as exc:\n msg = 'Failed to access group {0}. {1}'.format(\n name, win32api.FormatMessage(exc.excepinfo[5]))\n log.debug(msg)\n return False\n\n if not gr_name:\n return False\n\n return {'name': gr_name,\n 'passwd': None,\n 'gid': None,\n 'members': gr_mem}\n",
"def _get_computer_object():\n '''\n A helper function to get the object for the local machine\n\n Returns:\n object: Returns the computer object for the local machine\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n return nt.GetObject('', 'WinNT://.,computer')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
delete
|
python
|
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
|
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L131-L163
|
[
"def info(name):\n '''\n Return information about a group\n\n Args:\n\n name (str):\n The name of the group for which to get information\n\n Returns:\n dict: A dictionary of information about the group\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' group.info foo\n '''\n try:\n groupObj = _get_group_object(name)\n gr_name = groupObj.Name\n gr_mem = [_get_username(x) for x in groupObj.members()]\n except pywintypes.com_error as exc:\n msg = 'Failed to access group {0}. {1}'.format(\n name, win32api.FormatMessage(exc.excepinfo[5]))\n log.debug(msg)\n return False\n\n if not gr_name:\n return False\n\n return {'name': gr_name,\n 'passwd': None,\n 'gid': None,\n 'members': gr_mem}\n",
"def _get_computer_object():\n '''\n A helper function to get the object for the local machine\n\n Returns:\n object: Returns the computer object for the local machine\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n return nt.GetObject('', 'WinNT://.,computer')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
info
|
python
|
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
|
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L166-L200
|
[
"def _get_group_object(name):\n '''\n A helper function to get a specified group object\n\n Args:\n\n name (str): The name of the object\n\n Returns:\n object: The specified group object\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n return nt.GetObject('', 'WinNT://./' + name + ',group')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
getent
|
python
|
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
|
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L203-L238
|
[
"def _get_all_groups():\n '''\n A helper function that gets a list of group objects for all groups on the\n machine\n\n Returns:\n iter: A list of objects for all groups on the machine\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n results = nt.GetObject('', 'WinNT://.')\n results.Filter = ['group']\n return results\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
adduser
|
python
|
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
|
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L241-L286
|
[
"def get_sam_name(username):\n r'''\n Gets the SAM name for a user. It basically prefixes a username without a\n backslash with the computer name. If the user does not exist, a SAM\n compatible name will be returned using the local hostname as the domain.\n\n i.e. salt.utils.get_same_name('Administrator') would return 'DOMAIN.COM\\Administrator'\n\n .. note:: Long computer names are truncated to 15 characters\n '''\n try:\n sid_obj = win32security.LookupAccountName(None, username)[0]\n except pywintypes.error:\n return '\\\\'.join([platform.node()[:15].upper(), username])\n username, domain, _ = win32security.LookupAccountSid(None, sid_obj)\n return '\\\\'.join([domain, username])\n",
"def _get_group_object(name):\n '''\n A helper function to get a specified group object\n\n Args:\n\n name (str): The name of the object\n\n Returns:\n object: The specified group object\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n return nt.GetObject('', 'WinNT://./' + name + ',group')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
members
|
python
|
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
|
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L336-L405
|
[
"def _get_group_object(name):\n '''\n A helper function to get a specified group object\n\n Args:\n\n name (str): The name of the object\n\n Returns:\n object: The specified group object\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n return nt.GetObject('', 'WinNT://./' + name + ',group')\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
saltstack/salt
|
salt/modules/win_groupadd.py
|
list_groups
|
python
|
def list_groups(refresh=False):
'''
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
'''
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_groupadd.py#L408-L441
|
[
"def _get_all_groups():\n '''\n A helper function that gets a list of group objects for all groups on the\n machine\n\n Returns:\n iter: A list of objects for all groups on the machine\n '''\n with salt.utils.winapi.Com():\n nt = win32com.client.Dispatch('AdsNameSpaces')\n results = nt.GetObject('', 'WinNT://.')\n results.Filter = ['group']\n return results\n"
] |
# -*- coding: utf-8 -*-
'''
Manage groups on Windows
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import, unicode_literals, print_function
import logging
# Import Salt libs
import salt.utils.platform
import salt.utils.win_functions
import salt.utils.winapi
try:
import win32api
import win32com.client
import pywintypes
HAS_DEPENDENCIES = True
except ImportError:
HAS_DEPENDENCIES = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'group'
def __virtual__():
'''
Set the group module if the kernel is Windows
'''
if salt.utils.platform.is_windows() and HAS_DEPENDENCIES:
return __virtualname__
return (False, "Module win_groupadd: module only works on Windows systems")
def _get_computer_object():
'''
A helper function to get the object for the local machine
Returns:
object: Returns the computer object for the local machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://.,computer')
def _get_group_object(name):
'''
A helper function to get a specified group object
Args:
name (str): The name of the object
Returns:
object: The specified group object
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
return nt.GetObject('', 'WinNT://./' + name + ',group')
def _get_all_groups():
'''
A helper function that gets a list of group objects for all groups on the
machine
Returns:
iter: A list of objects for all groups on the machine
'''
with salt.utils.winapi.Com():
nt = win32com.client.Dispatch('AdsNameSpaces')
results = nt.GetObject('', 'WinNT://.')
results.Filter = ['group']
return results
def _get_username(member):
'''
Resolve the username from the member object returned from a group query
Returns:
str: The username converted to domain\\username format
'''
return member.ADSPath.replace('WinNT://', '').replace(
'/', '\\')
def add(name, **kwargs):
'''
Add the specified group
Args:
name (str):
The name of the group to add
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.add foo
'''
if not info(name):
comp_obj = _get_computer_object()
try:
new_group = comp_obj.Create('group', name)
new_group.SetInfo()
log.info('Successfully created group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to create group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s already exists.', name)
return False
return True
def delete(name, **kwargs):
'''
Remove the named group
Args:
name (str):
The name of the group to remove
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
'''
if info(name):
comp_obj = _get_computer_object()
try:
comp_obj.Delete('group', name)
log.info('Successfully removed group %s', name)
except pywintypes.com_error as exc:
msg = 'Failed to remove group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
else:
log.warning('The group %s does not exists.', name)
return False
return True
def info(name):
'''
Return information about a group
Args:
name (str):
The name of the group for which to get information
Returns:
dict: A dictionary of information about the group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
'''
try:
groupObj = _get_group_object(name)
gr_name = groupObj.Name
gr_mem = [_get_username(x) for x in groupObj.members()]
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.debug(msg)
return False
if not gr_name:
return False
return {'name': gr_name,
'passwd': None,
'gid': None,
'members': gr_mem}
def getent(refresh=False):
'''
Return info on all groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
A list of groups and their information
CLI Example:
.. code-block:: bash
salt '*' group.getent
'''
if 'group.getent' in __context__ and not refresh:
return __context__['group.getent']
ret = []
results = _get_all_groups()
for result in results:
group = {'gid': __salt__['file.group_to_gid'](result.Name),
'members': [_get_username(x) for x in result.members()],
'name': result.Name,
'passwd': 'x'}
ret.append(group)
__context__['group.getent'] = ret
return ret
def adduser(name, username, **kwargs):
'''
Add a user to a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to add to the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
username = salt.utils.win_functions.get_sam_name(username)
try:
if username not in existing_members:
group_obj.Add('WinNT://' + username.replace('\\', '/'))
log.info('Added user %s', username)
else:
log.warning('User %s is already a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def deluser(name, username, **kwargs):
'''
Remove a user from a group
Args:
name (str):
The name of the group to modify
username (str):
The name of the user to remove from the group
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo username
'''
try:
group_obj = _get_group_object(name)
except pywintypes.com_error as exc:
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in group_obj.members()]
try:
if salt.utils.win_functions.get_sam_name(username) in existing_members:
group_obj.Remove('WinNT://' + username.replace('\\', '/'))
log.info('Removed user %s', username)
else:
log.warning('User %s is not a member of %s', username, name)
return False
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from group {1}. {2}'.format(
username, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
return True
def members(name, members_list, **kwargs):
'''
Ensure a group contains only the members in the list
Args:
name (str):
The name of the group to modify
members_list (str):
A single user or a comma separated list of users. The group will
contain only the users specified in this list.
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3'
'''
members_list = [salt.utils.win_functions.get_sam_name(m) for m in members_list.split(",")]
if not isinstance(members_list, list):
log.debug('member_list is not a list')
return False
try:
obj_group = _get_group_object(name)
except pywintypes.com_error as exc:
# Group probably doesn't exist, but we'll log the error
msg = 'Failed to access group {0}. {1}'.format(
name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
return False
existing_members = [_get_username(x) for x in obj_group.members()]
existing_members.sort()
members_list.sort()
if existing_members == members_list:
log.info('%s membership is correct', name)
return True
# add users
success = True
for member in members_list:
if member not in existing_members:
try:
obj_group.Add('WinNT://' + member.replace('\\', '/'))
log.info('User added: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to add {0} to {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
# remove users not in members_list
for member in existing_members:
if member not in members_list:
try:
obj_group.Remove('WinNT://' + member.replace('\\', '/'))
log.info('User removed: %s', member)
except pywintypes.com_error as exc:
msg = 'Failed to remove {0} from {1}. {2}'.format(
member, name, win32api.FormatMessage(exc.excepinfo[5]))
log.error(msg)
success = False
return success
|
saltstack/salt
|
salt/utils/decorators/signature.py
|
identical_signature_wrapper
|
python
|
def identical_signature_wrapper(original_function, wrapped_function):
'''
Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``.
'''
context = {'__wrapped__': wrapped_function}
function_def = compile(
'def {0}({1}):\n'
' return __wrapped__({2})'.format(
# Keep the original function name
original_function.__name__,
# The function signature including defaults, i.e., 'timeout=1'
inspect.formatargspec(
*salt.utils.args.get_function_argspec(original_function)
)[1:-1],
# The function signature without the defaults
inspect.formatargspec(
formatvalue=lambda val: '',
*salt.utils.args.get_function_argspec(original_function)
)[1:-1]
),
'<string>',
'exec'
)
six.exec_(function_def, context)
return wraps(original_function)(context[original_function.__name__])
|
Return a function with identical signature as ``original_function``'s which
will call the ``wrapped_function``.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/decorators/signature.py#L18-L43
|
[
"def get_function_argspec(func, is_class_method=None):\n '''\n A small wrapper around getargspec that also supports callable classes\n :param is_class_method: Pass True if you are sure that the function being passed\n is a class method. The reason for this is that on Python 3\n ``inspect.ismethod`` only returns ``True`` for bound methods,\n while on Python 2, it returns ``True`` for bound and unbound\n methods. So, on Python 3, in case of a class method, you'd\n need the class to which the function belongs to be instantiated\n and this is not always wanted.\n '''\n if not callable(func):\n raise TypeError('{0} is not a callable'.format(func))\n\n if six.PY2:\n if is_class_method is True:\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = inspect.getargspec(func)\n elif inspect.ismethod(func):\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = inspect.getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n else:\n if is_class_method is True:\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = _getargspec(func) # pylint: disable=redefined-variable-type\n elif inspect.ismethod(func):\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = _getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n return aspec\n",
"def exec_(_code_, _globs_=None, _locs_=None):\n \"\"\"Execute code in a namespace.\"\"\"\n if _globs_ is None:\n frame = sys._getframe(1)\n _globs_ = frame.f_globals\n if _locs_ is None:\n _locs_ = frame.f_locals\n del frame\n elif _locs_ is None:\n _locs_ = _globs_\n exec(\"\"\"exec _code_ in _globs_, _locs_\"\"\")\n"
] |
# -*- coding: utf-8 -*-
'''
A decorator which returns a function with the same signature of the function
which is being wrapped.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import inspect
from functools import wraps
# Import Salt libs
import salt.utils.args
# Import 3rd-party libs
from salt.ext import six
|
saltstack/salt
|
salt/modules/openvswitch.py
|
_stdout_list_split
|
python
|
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
|
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L80-L96
| null |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
_convert_json
|
python
|
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
|
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L99-L129
|
[
"def iteritems(d, **kw):\n return d.iteritems(**kw)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_list
|
python
|
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
|
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L147-L165
|
[
"def _stdout_list_split(retcode, stdout='', splitstring='\\n'):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n stdout: Value of stdout filed from response.\n splitstring: String used to split the stdout default new line.\n\n Returns:\n List or False.\n '''\n if retcode == 0:\n ret = stdout.split(splitstring)\n return ret\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_exists
|
python
|
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L168-L185
|
[
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_create
|
python
|
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L188-L223
|
[
"def _param_may_exist(may_exist):\n '''\n Returns --may-exist parameter for Open vSwitch command.\n\n Args:\n may_exist: Boolean whether to use this parameter.\n\n Returns:\n String '--may-exist ' or empty string.\n '''\n if may_exist:\n return '--may-exist '\n else:\n return ''\n",
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_delete
|
python
|
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L226-L248
|
[
"def _param_if_exists(if_exists):\n '''\n Returns --if-exist parameter for Open vSwitch command.\n\n Args:\n if_exists: Boolean whether to use this parameter.\n\n Returns:\n String '--if-exist ' or empty string.\n '''\n if if_exists:\n return '--if-exists '\n else:\n return ''\n",
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_to_parent
|
python
|
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
|
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L251-L274
| null |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
bridge_to_vlan
|
python
|
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
|
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L277-L298
| null |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_add
|
python
|
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L301-L327
|
[
"def _param_may_exist(may_exist):\n '''\n Returns --may-exist parameter for Open vSwitch command.\n\n Args:\n may_exist: Boolean whether to use this parameter.\n\n Returns:\n String '--may-exist ' or empty string.\n '''\n if may_exist:\n return '--may-exist '\n else:\n return ''\n",
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_remove
|
python
|
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
|
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L330-L357
|
[
"def _param_if_exists(if_exists):\n '''\n Returns --if-exist parameter for Open vSwitch command.\n\n Args:\n if_exists: Boolean whether to use this parameter.\n\n Returns:\n String '--if-exist ' or empty string.\n '''\n if if_exists:\n return '--if-exists '\n else:\n return ''\n",
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_list
|
python
|
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
|
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L360-L381
|
[
"def _stdout_list_split(retcode, stdout='', splitstring='\\n'):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n stdout: Value of stdout filed from response.\n splitstring: String used to split the stdout default new line.\n\n Returns:\n List or False.\n '''\n if retcode == 0:\n ret = stdout.split(splitstring)\n return ret\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_get_tag
|
python
|
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
|
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L384-L405
|
[
"def _stdout_list_split(retcode, stdout='', splitstring='\\n'):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n stdout: Value of stdout filed from response.\n splitstring: String used to split the stdout default new line.\n\n Returns:\n List or False.\n '''\n if retcode == 0:\n ret = stdout.split(splitstring)\n return ret\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_create_vlan
|
python
|
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L456-L494
|
[
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n",
"def bridge_exists(br):\n '''\n Tests whether bridge exists as a real or fake bridge.\n\n Returns:\n True if Bridge exists, else False.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.bridge_exists br0\n '''\n cmd = 'ovs-vsctl br-exists {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n return _retcode_to_bool(retcode)\n",
"def port_list(br):\n '''\n Lists all of the ports within bridge.\n\n Args:\n br: A string - bridge name.\n\n Returns:\n List of bridges (or empty list), False on failure.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.port_list br0\n '''\n cmd = 'ovs-vsctl list-ports {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n stdout = result['stdout']\n return _stdout_list_split(retcode, stdout)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_create_gre
|
python
|
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L497-L531
|
[
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n",
"def bridge_exists(br):\n '''\n Tests whether bridge exists as a real or fake bridge.\n\n Returns:\n True if Bridge exists, else False.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.bridge_exists br0\n '''\n cmd = 'ovs-vsctl br-exists {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n return _retcode_to_bool(retcode)\n",
"def port_list(br):\n '''\n Lists all of the ports within bridge.\n\n Args:\n br: A string - bridge name.\n\n Returns:\n List of bridges (or empty list), False on failure.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.port_list br0\n '''\n cmd = 'ovs-vsctl list-ports {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n stdout = result['stdout']\n return _stdout_list_split(retcode, stdout)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
port_create_vxlan
|
python
|
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
|
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L534-L571
|
[
"def _retcode_to_bool(retcode):\n '''\n Evaulates Open vSwitch command`s retcode value.\n\n Args:\n retcode: Value of retcode field from response, should be 0, 1 or 2.\n\n Returns:\n True on 0, else False\n '''\n if retcode == 0:\n return True\n else:\n return False\n",
"def bridge_exists(br):\n '''\n Tests whether bridge exists as a real or fake bridge.\n\n Returns:\n True if Bridge exists, else False.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.bridge_exists br0\n '''\n cmd = 'ovs-vsctl br-exists {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n return _retcode_to_bool(retcode)\n",
"def port_list(br):\n '''\n Lists all of the ports within bridge.\n\n Args:\n br: A string - bridge name.\n\n Returns:\n List of bridges (or empty list), False on failure.\n\n .. versionadded:: 2016.3.0\n\n CLI Example:\n .. code-block:: bash\n\n salt '*' openvswitch.port_list br0\n '''\n cmd = 'ovs-vsctl list-ports {0}'.format(br)\n result = __salt__['cmd.run_all'](cmd)\n retcode = result['retcode']\n stdout = result['stdout']\n return _stdout_list_split(retcode, stdout)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
db_get
|
python
|
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
|
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L574-L604
|
[
"def _stdout_parse_json(stdout):\n '''\n Parses JSON output from ovs-vsctl and returns the corresponding object\n tree.\n\n Args:\n stdout: Output that shall be parsed.\n\n Returns:\n Object represented by the output.\n '''\n obj = json.loads(stdout)\n return _convert_json(obj)\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
saltstack/salt
|
salt/modules/openvswitch.py
|
db_set
|
python
|
def db_set(table, record, column, value, if_exists=False):
'''
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
'''
cmd = ['ovs-vsctl']
if if_exists:
cmd += ['--if-exists']
cmd += ['set', table, record, '{0}={1}'.format(column, json.dumps(value))]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return result['stderr']
else:
return None
|
Sets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
value: A string - the value to be set
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
None on success and an error message on failure.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_set Interface br0 mac 02:03:04:05:06:07
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L607-L635
|
[
"def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n"
] |
# -*- coding: utf-8 -*-
'''
Support for Open vSwitch - module with basic Open vSwitch commands.
Suitable for setting up Openstack Neutron.
:codeauthor: Jiri Kotlin <jiri.kotlin@ultimum.io>
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
# Import salt libs
from salt.ext import six
from salt.exceptions import ArgumentValueError, CommandExecutionError
from salt.utils import json
import salt.utils.path
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load the module if Open vSwitch is installed
'''
if salt.utils.path.which('ovs-vsctl'):
return 'openvswitch'
return False
def _param_may_exist(may_exist):
'''
Returns --may-exist parameter for Open vSwitch command.
Args:
may_exist: Boolean whether to use this parameter.
Returns:
String '--may-exist ' or empty string.
'''
if may_exist:
return '--may-exist '
else:
return ''
def _param_if_exists(if_exists):
'''
Returns --if-exist parameter for Open vSwitch command.
Args:
if_exists: Boolean whether to use this parameter.
Returns:
String '--if-exist ' or empty string.
'''
if if_exists:
return '--if-exists '
else:
return ''
def _retcode_to_bool(retcode):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
Returns:
True on 0, else False
'''
if retcode == 0:
return True
else:
return False
def _stdout_list_split(retcode, stdout='', splitstring='\n'):
'''
Evaulates Open vSwitch command`s retcode value.
Args:
retcode: Value of retcode field from response, should be 0, 1 or 2.
stdout: Value of stdout filed from response.
splitstring: String used to split the stdout default new line.
Returns:
List or False.
'''
if retcode == 0:
ret = stdout.split(splitstring)
return ret
else:
return False
def _convert_json(obj):
'''
Converts from the JSON output provided by ovs-vsctl into a usable Python
object tree. In particular, sets and maps are converted from lists to
actual sets or maps.
Args:
obj: Object that shall be recursively converted.
Returns:
Converted version of object.
'''
if isinstance(obj, dict):
return {_convert_json(key): _convert_json(val)
for (key, val) in six.iteritems(obj)}
elif isinstance(obj, list) and len(obj) == 2:
first = obj[0]
second = obj[1]
if first == 'set' and isinstance(second, list):
return [_convert_json(elem) for elem in second]
elif first == 'map' and isinstance(second, list):
for elem in second:
if not isinstance(elem, list) or len(elem) != 2:
return obj
return {elem[0]: _convert_json(elem[1]) for elem in second}
else:
return obj
elif isinstance(obj, list):
return [_convert_json(elem) for elem in obj]
else:
return obj
def _stdout_parse_json(stdout):
'''
Parses JSON output from ovs-vsctl and returns the corresponding object
tree.
Args:
stdout: Output that shall be parsed.
Returns:
Object represented by the output.
'''
obj = json.loads(stdout)
return _convert_json(obj)
def bridge_list():
'''
Lists all existing real and fake bridges.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_list
'''
cmd = 'ovs-vsctl list-br'
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def bridge_exists(br):
'''
Tests whether bridge exists as a real or fake bridge.
Returns:
True if Bridge exists, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_exists br0
'''
cmd = 'ovs-vsctl br-exists {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_create(br, may_exist=True, parent=None, vlan=None):
'''
Creates a new bridge.
Args:
br: A string - bridge name
may_exist: Bool, if False - attempting to create a bridge that exists returns False.
parent: String, the name of the parent bridge (if the bridge shall be
created as a fake bridge). If specified, vlan must also be
specified.
vlan: Int, the VLAN ID of the bridge (if the bridge shall be created as
a fake bridge). If specified, parent must also be specified.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_create br0
'''
param_may_exist = _param_may_exist(may_exist)
if parent is not None and vlan is None:
raise ArgumentValueError(
'If parent is specified, vlan must also be specified.')
if vlan is not None and parent is None:
raise ArgumentValueError(
'If vlan is specified, parent must also be specified.')
param_parent = '' if parent is None else ' {0}'.format(parent)
param_vlan = '' if vlan is None else ' {0}'.format(vlan)
cmd = 'ovs-vsctl {1}add-br {0}{2}{3}'.format(br, param_may_exist, param_parent,
param_vlan)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def bridge_delete(br, if_exists=True):
'''
Deletes bridge and all of its ports.
Args:
br: A string - bridge name
if_exists: Bool, if False - attempting to delete a bridge that does not exist returns False.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_delete br0
'''
param_if_exists = _param_if_exists(if_exists)
cmd = 'ovs-vsctl {1}del-br {0}'.format(br, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def bridge_to_parent(br):
'''
Returns the parent bridge of a bridge.
Args:
br: A string - bridge name
Returns:
Name of the parent bridge. This is the same as the bridge name if the
bridge is not a fake bridge. If the bridge does not exist, False is
returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-parent {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return result['stdout']
def bridge_to_vlan(br):
'''
Returns the VLAN ID of a bridge.
Args:
br: A string - bridge name
Returns:
VLAN ID of the bridge. The VLAN ID is 0 if the bridge is not a fake
bridge. If the bridge does not exist, False is returned.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.bridge_to_parent br0
'''
cmd = 'ovs-vsctl br-to-vlan {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
return False
return int(result['stdout'])
def port_add(br, port, may_exist=False, internal=False):
'''
Creates on bridge a new port named port.
Returns:
True on success, else False.
Args:
br: A string - bridge name
port: A string - port name
may_exist: Bool, if False - attempting to create a port that exists returns False.
internal: A boolean to create an internal interface if one does not exist.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_add br0 8080
'''
param_may_exist = _param_may_exist(may_exist)
cmd = 'ovs-vsctl {2}add-port {0} {1}'.format(br, port, param_may_exist)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_remove(br, port, if_exists=True):
'''
Deletes port.
Args:
br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it)
port: A string - port name.
if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True)
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_remove br0 8080
'''
param_if_exists = _param_if_exists(if_exists)
if port and not br:
cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists)
else:
cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
return _retcode_to_bool(retcode)
def port_list(br):
'''
Lists all of the ports within bridge.
Args:
br: A string - bridge name.
Returns:
List of bridges (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_list br0
'''
cmd = 'ovs-vsctl list-ports {0}'.format(br)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_get_tag(port):
'''
Lists tags of the port.
Args:
port: A string - port name.
Returns:
List of tags (or empty list), False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_get_tag tap0
'''
cmd = 'ovs-vsctl get port {0} tag'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_options(port):
'''
Port's interface's optional parameters.
Args:
port: A string - port name.
Returns:
String containing optional parameters of port's interface, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_options tap0
'''
cmd = 'ovs-vsctl get interface {0} options'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def interface_get_type(port):
'''
Type of port's interface.
Args:
port: A string - port name.
Returns:
String - type of interface or empty string, False on failure.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.interface_get_type tap0
'''
cmd = 'ovs-vsctl get interface {0} type'.format(port)
result = __salt__['cmd.run_all'](cmd)
retcode = result['retcode']
stdout = result['stdout']
return _stdout_list_split(retcode, stdout)
def port_create_vlan(br, port, id, internal=False):
'''
Isolate VM traffic using VLANs.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer in the valid range 0 to 4095 (inclusive), name of VLAN.
internal: A boolean to create an internal interface if one does not exist.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vlan br0 tap0 100
'''
interfaces = __salt__['network.interfaces']()
if not 0 <= id <= 4095:
return False
elif not bridge_exists(br):
return False
elif not internal and port not in interfaces:
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set port {0} tag={1}'.format(port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} tag={2}'.format(br, port, id)
if internal:
cmd += ' -- set interface {0} type=internal'.format(port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_gre(br, port, id, remote):
'''
Generic Routing Encapsulation - creates GRE tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 32-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_gre br0 gre1 5001 192.168.1.10
'''
if not 0 <= id < 2**32:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=gre options:remote_ip={1} options:key={2}'.format(port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=gre options:remote_ip={2} ' \
'options:key={3}'.format(br, port, remote, id)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def port_create_vxlan(br, port, id, remote, dst_port=None):
'''
Virtual eXtensible Local Area Network - creates VXLAN tunnel between endpoints.
Args:
br: A string - bridge name.
port: A string - port name.
id: An integer - unsigned 64-bit number, tunnel's key.
remote: A string - remote endpoint's IP address.
dst_port: An integer - port to use when creating tunnelport in the switch.
Returns:
True on success, else False.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' openvswitch.port_create_vxlan br0 vx1 5001 192.168.1.10 8472
'''
dst_port = ' options:dst_port=' + six.text_type(dst_port) if 0 < dst_port <= 65535 else ''
if not 0 <= id < 2**64:
return False
elif not __salt__['dig.check_ip'](remote):
return False
elif not bridge_exists(br):
return False
elif port in port_list(br):
cmd = 'ovs-vsctl set interface {0} type=vxlan options:remote_ip={1} ' \
'options:key={2}{3}'.format(port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
else:
cmd = 'ovs-vsctl add-port {0} {1} -- set interface {1} type=vxlan options:remote_ip={2} ' \
'options:key={3}{4}'.format(br, port, remote, id, dst_port)
result = __salt__['cmd.run_all'](cmd)
return _retcode_to_bool(result['retcode'])
def db_get(table, record, column, if_exists=False):
'''
Gets a column's value for a specific record.
Args:
table: A string - name of the database table.
record: A string - identifier of the record.
column: A string - name of the column.
if_exists: A boolean - if True, it is not an error if the record does
not exist.
Returns:
The column's value.
CLI Example:
.. code-block:: bash
salt '*' openvswitch.db_get Port br0 vlan_mode
'''
cmd = ['ovs-vsctl', '--format=json', '--columns={0}'.format(column)]
if if_exists:
cmd += ['--if-exists']
cmd += ['list', table, record]
result = __salt__['cmd.run_all'](cmd)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
output = _stdout_parse_json(result['stdout'])
if output['data'] and output['data'][0]:
return output['data'][0][0]
else:
return None
|
saltstack/salt
|
salt/states/azurearm_network.py
|
virtual_network_present
|
python
|
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L112-L258
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
virtual_network_absent
|
python
|
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
|
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L261-L321
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
subnet_present
|
python
|
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L324-L463
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
subnet_absent
|
python
|
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
|
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L466-L530
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
network_security_group_present
|
python
|
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L533-L671
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
network_security_group_absent
|
python
|
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
|
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L674-L734
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
saltstack/salt
|
salt/states/azurearm_network.py
|
security_rule_present
|
python
|
def security_rule_present(name, access, direction, priority, protocol, security_group, resource_group,
destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
source_port_range=None, description=None, destination_address_prefixes=None,
destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
return ret
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=eval-used
if not isinstance(eval(params[0]), list):
ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
return ret
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rule:
# access changes
if access.capitalize() != rule.get('access'):
ret['changes']['access'] = {
'old': rule.get('access'),
'new': access
}
# description changes
if description != rule.get('description'):
ret['changes']['description'] = {
'old': rule.get('description'),
'new': description
}
# direction changes
if direction.capitalize() != rule.get('direction'):
ret['changes']['direction'] = {
'old': rule.get('direction'),
'new': direction
}
# priority changes
if int(priority) != rule.get('priority'):
ret['changes']['priority'] = {
'old': rule.get('priority'),
'new': priority
}
# protocol changes
if protocol.lower() != rule.get('protocol', '').lower():
ret['changes']['protocol'] = {
'old': rule.get('protocol'),
'new': protocol
}
# destination_port_range changes
if destination_port_range != rule.get('destination_port_range'):
ret['changes']['destination_port_range'] = {
'old': rule.get('destination_port_range'),
'new': destination_port_range
}
# source_port_range changes
if source_port_range != rule.get('source_port_range'):
ret['changes']['source_port_range'] = {
'old': rule.get('source_port_range'),
'new': source_port_range
}
# destination_port_ranges changes
if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
ret['changes']['destination_port_ranges'] = {
'old': rule.get('destination_port_ranges'),
'new': destination_port_ranges
}
# source_port_ranges changes
if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
ret['changes']['source_port_ranges'] = {
'old': rule.get('source_port_ranges'),
'new': source_port_ranges
}
# destination_address_prefix changes
if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
ret['changes']['destination_address_prefix'] = {
'old': rule.get('destination_address_prefix'),
'new': destination_address_prefix
}
# source_address_prefix changes
if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
ret['changes']['source_address_prefix'] = {
'old': rule.get('source_address_prefix'),
'new': source_address_prefix
}
# destination_address_prefixes changes
if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
else:
local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
sorted(rule.get('destination_address_prefixes')))
for idx in six_range(0, len(local_dst_addrs)):
if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
ret['changes']['destination_address_prefixes'] = {
'old': rule.get('destination_address_prefixes'),
'new': destination_address_prefixes
}
break
# source_address_prefixes changes
if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
else:
local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
sorted(rule.get('source_address_prefixes')))
for idx in six_range(0, len(local_src_addrs)):
if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
ret['changes']['source_address_prefixes'] = {
'old': rule.get('source_address_prefixes'),
'new': source_address_prefixes
}
break
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Security rule {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Security rule {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'access': access,
'description': description,
'direction': direction,
'priority': priority,
'protocol': protocol,
'destination_address_prefix': destination_address_prefix,
'destination_address_prefixes': destination_address_prefixes,
'destination_port_range': destination_port_range,
'destination_port_ranges': destination_port_ranges,
'source_address_prefix': source_address_prefix,
'source_address_prefixes': source_address_prefixes,
'source_port_range': source_port_range,
'source_port_ranges': source_port_ranges,
}
}
if __opts__['test']:
ret['comment'] = 'Security rule {0} would be created.'.format(name)
ret['result'] = None
return ret
rule_kwargs = kwargs.copy()
rule_kwargs.update(connection_auth)
rule = __salt__['azurearm_network.security_rule_create_or_update'](
name=name,
access=access,
description=description,
direction=direction,
priority=priority,
protocol=protocol,
security_group=security_group,
resource_group=resource_group,
destination_address_prefix=destination_address_prefix,
destination_address_prefixes=destination_address_prefixes,
destination_port_range=destination_port_range,
destination_port_ranges=destination_port_ranges,
source_address_prefix=source_address_prefix,
source_address_prefixes=source_address_prefixes,
source_port_range=source_port_range,
source_port_ranges=source_port_ranges,
**rule_kwargs
)
if 'error' not in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
return ret
|
.. versionadded:: 2019.2.0
Ensure a security rule exists.
:param name:
Name of the security rule.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param security_group:
The name of the existing network security group to contain the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param description:
Optional description of the security rule.
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure security rule exists:
azurearm_network.security_rule_present:
- name: nsg1_rule2
- security_group: nsg1
- resource_group: group1
- priority: 101
- protocol: tcp
- access: allow
- direction: inbound
- source_address_prefix: internet
- destination_address_prefix: virtualnetwork
- source_port_range: '*'
- destination_port_ranges:
- '80'
- '443'
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure network security group exists
|
train
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/azurearm_network.py#L737-L1052
| null |
# -*- coding: utf-8 -*-
'''
Azure (ARM) Network State Module
.. versionadded:: 2019.2.0
:maintainer: <devops@decisionlab.io>
:maturity: new
:depends:
* `azure <https://pypi.python.org/pypi/azure>`_ >= 2.0.0
* `azure-common <https://pypi.python.org/pypi/azure-common>`_ >= 1.1.8
* `azure-mgmt <https://pypi.python.org/pypi/azure-mgmt>`_ >= 1.0.0
* `azure-mgmt-compute <https://pypi.python.org/pypi/azure-mgmt-compute>`_ >= 1.0.0
* `azure-mgmt-network <https://pypi.python.org/pypi/azure-mgmt-network>`_ >= 1.7.1
* `azure-mgmt-resource <https://pypi.python.org/pypi/azure-mgmt-resource>`_ >= 1.1.0
* `azure-mgmt-storage <https://pypi.python.org/pypi/azure-mgmt-storage>`_ >= 1.0.0
* `azure-mgmt-web <https://pypi.python.org/pypi/azure-mgmt-web>`_ >= 0.32.0
* `azure-storage <https://pypi.python.org/pypi/azure-storage>`_ >= 0.34.3
* `msrestazure <https://pypi.python.org/pypi/msrestazure>`_ >= 0.4.21
:platform: linux
:configuration: This module requires Azure Resource Manager credentials to be passed as a dictionary of
keyword arguments to the ``connection_auth`` parameter in order to work properly. Since the authentication
parameters are sensitive, it's recommended to pass them to the states via pillar.
Required provider parameters:
if using username and password:
* ``subscription_id``
* ``username``
* ``password``
if using a service principal:
* ``subscription_id``
* ``tenant``
* ``client_id``
* ``secret``
Optional provider parameters:
**cloud_environment**: Used to point the cloud driver to different API endpoints, such as Azure GovCloud. Possible values:
* ``AZURE_PUBLIC_CLOUD`` (default)
* ``AZURE_CHINA_CLOUD``
* ``AZURE_US_GOV_CLOUD``
* ``AZURE_GERMAN_CLOUD``
Example Pillar for Azure Resource Manager authentication:
.. code-block:: yaml
azurearm:
user_pass_auth:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
username: fletch
password: 123pass
mysubscription:
subscription_id: 3287abc8-f98a-c678-3bde-326766fd3617
tenant: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
client_id: ABCDEFAB-1234-ABCD-1234-ABCDEFABCDEF
secret: XXXXXXXXXXXXXXXXXXXXXXXX
cloud_environment: AZURE_PUBLIC_CLOUD
Example states using Azure Resource Manager authentication:
.. code-block:: jinja
{% set profile = salt['pillar.get']('azurearm:mysubscription') %}
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: my_vnet
- resource_group: my_rg
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
how_awesome: very
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
Ensure virtual network is absent:
azurearm_network.virtual_network_absent:
- name: other_vnet
- resource_group: my_rg
- connection_auth: {{ profile }}
'''
# Python libs
from __future__ import absolute_import
import logging
# Salt libs
try:
from salt.ext.six.moves import range as six_range
except ImportError:
six_range = range
__virtualname__ = 'azurearm_network'
log = logging.getLogger(__name__)
def __virtual__():
'''
Only make this state available if the azurearm_network module is available.
'''
return __virtualname__ if 'azurearm_network.check_ip_address_availability' in __salt__ else False
def virtual_network_present(name, address_prefixes, resource_group, dns_servers=None,
tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network exists.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param address_prefixes:
A list of CIDR blocks which can be used by subnets within the virtual network.
:param dns_servers:
A list of DNS server addresses.
:param tags:
A dictionary of strings can be passed as tag metadata to the virtual network object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure virtual network exists:
azurearm_network.virtual_network_present:
- name: vnet1
- resource_group: group1
- address_prefixes:
- '10.0.0.0/8'
- '192.168.0.0/16'
- dns_servers:
- '8.8.8.8'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in vnet:
tag_changes = __utils__['dictdiffer.deep_diff'](vnet.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
dns_changes = set(dns_servers or []).symmetric_difference(
set(vnet.get('dhcp_options', {}).get('dns_servers', [])))
if dns_changes:
ret['changes']['dns_servers'] = {
'old': vnet.get('dhcp_options', {}).get('dns_servers', []),
'new': dns_servers,
}
addr_changes = set(address_prefixes or []).symmetric_difference(
set(vnet.get('address_space', {}).get('address_prefixes', [])))
if addr_changes:
ret['changes']['address_space'] = {
'address_prefixes': {
'old': vnet.get('address_space', {}).get('address_prefixes', []),
'new': address_prefixes,
}
}
if kwargs.get('enable_ddos_protection', False) != vnet.get('enable_ddos_protection'):
ret['changes']['enable_ddos_protection'] = {
'old': vnet.get('enable_ddos_protection'),
'new': kwargs.get('enable_ddos_protection')
}
if kwargs.get('enable_vm_protection', False) != vnet.get('enable_vm_protection'):
ret['changes']['enable_vm_protection'] = {
'old': vnet.get('enable_vm_protection'),
'new': kwargs.get('enable_vm_protection')
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Virtual network {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Virtual network {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'address_space': {'address_prefixes': address_prefixes},
'dhcp_options': {'dns_servers': dns_servers},
'enable_ddos_protection': kwargs.get('enable_ddos_protection', False),
'enable_vm_protection': kwargs.get('enable_vm_protection', False),
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Virtual network {0} would be created.'.format(name)
ret['result'] = None
return ret
vnet_kwargs = kwargs.copy()
vnet_kwargs.update(connection_auth)
vnet = __salt__['azurearm_network.virtual_network_create_or_update'](
name=name,
resource_group=resource_group,
address_prefixes=address_prefixes,
dns_servers=dns_servers,
tags=tags,
**vnet_kwargs
)
if 'error' not in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create virtual network {0}! ({1})'.format(name, vnet.get('error'))
return ret
def virtual_network_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the resource group.
:param name:
Name of the virtual network.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
vnet = __salt__['azurearm_network.virtual_network_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in vnet:
ret['result'] = True
ret['comment'] = 'Virtual network {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Virtual network {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': vnet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.virtual_network_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Virtual network {0} has been deleted.'.format(name)
ret['changes'] = {
'old': vnet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete virtual network {0}!'.format(name)
return ret
def subnet_present(name, address_prefix, virtual_network, resource_group,
security_group=None, route_table=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a subnet exists.
:param name:
Name of the subnet.
:param address_prefix:
A CIDR block used by the subnet within the virtual network.
:param virtual_network:
Name of the existing virtual network to contain the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param security_group:
The name of the existing network security group to assign to the subnet.
:param route_table:
The name of the existing route table to assign to the subnet.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure subnet exists:
azurearm_network.subnet_present:
- name: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- address_prefix: '192.168.1.0/24'
- security_group: nsg1
- route_table: rt1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure virtual network exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in snet:
if address_prefix != snet.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': snet.get('address_prefix'),
'new': address_prefix
}
nsg_name = None
if snet.get('network_security_group'):
nsg_name = snet['network_security_group']['id'].split('/')[-1]
if security_group and (security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': security_group
}
rttbl_name = None
if snet.get('route_table'):
rttbl_name = snet['route_table']['id'].split('/')[-1]
if route_table and (route_table != rttbl_name):
ret['changes']['route_table'] = {
'old': rttbl_name,
'new': route_table
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Subnet {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Subnet {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'network_security_group': security_group,
'route_table': route_table
}
}
if __opts__['test']:
ret['comment'] = 'Subnet {0} would be created.'.format(name)
ret['result'] = None
return ret
snet_kwargs = kwargs.copy()
snet_kwargs.update(connection_auth)
snet = __salt__['azurearm_network.subnet_create_or_update'](
name=name,
virtual_network=virtual_network,
resource_group=resource_group,
address_prefix=address_prefix,
network_security_group=security_group,
route_table=route_table,
**snet_kwargs
)
if 'error' not in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create subnet {0}! ({1})'.format(name, snet.get('error'))
return ret
def subnet_absent(name, virtual_network, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a virtual network does not exist in the virtual network.
:param name:
Name of the subnet.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
snet = __salt__['azurearm_network.subnet_get'](
name,
virtual_network,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in snet:
ret['result'] = True
ret['comment'] = 'Subnet {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Subnet {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': snet,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.subnet_delete'](name, virtual_network, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Subnet {0} has been deleted.'.format(name)
ret['changes'] = {
'old': snet,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete subnet {0}!'.format(name)
return ret
def network_security_group_present(name, resource_group, tags=None, security_rules=None, connection_auth=None,
**kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network security group exists.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param tags:
A dictionary of strings can be passed as tag metadata to the network security group object.
:param security_rules: An optional list of dictionaries representing valid SecurityRule objects. See the
documentation for the security_rule_present state or security_rule_create_or_update execution module
for more information on required and optional parameters for security rules. The rules are only
managed if this parameter is present. When this parameter is absent, implemented rules will not be removed,
and will merely become unmanaged.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network security group exists:
azurearm_network.network_security_group_present:
- name: nsg1
- resource_group: group1
- security_rules:
- name: nsg1_rule1
priority: 100
protocol: tcp
access: allow
direction: outbound
source_address_prefix: virtualnetwork
destination_address_prefix: internet
source_port_range: '*'
destination_port_range: '*'
- name: nsg1_rule2
priority: 101
protocol: tcp
access: allow
direction: inbound
source_address_prefix: internet
destination_address_prefix: virtualnetwork
source_port_range: '*'
destination_port_ranges:
- '80'
- '443'
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in nsg:
tag_changes = __utils__['dictdiffer.deep_diff'](nsg.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
if security_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](nsg.get('security_rules', []), security_rules)
if comp_ret.get('comment'):
ret['comment'] = '"security_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['security_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network security group {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network security group {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'resource_group': resource_group,
'tags': tags,
'security_rules': security_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Network security group {0} would be created.'.format(name)
ret['result'] = None
return ret
nsg_kwargs = kwargs.copy()
nsg_kwargs.update(connection_auth)
nsg = __salt__['azurearm_network.network_security_group_create_or_update'](
name=name,
resource_group=resource_group,
tags=tags,
security_rules=security_rules,
**nsg_kwargs
)
if 'error' not in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network security group {0}! ({1})'.format(name, nsg.get('error'))
return ret
def network_security_group_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network security group does not exist in the resource group.
:param name:
Name of the network security group.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
nsg = __salt__['azurearm_network.network_security_group_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in nsg:
ret['result'] = True
ret['comment'] = 'Network security group {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network security group {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': nsg,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_security_group_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network security group {0} has been deleted.'.format(name)
ret['changes'] = {
'old': nsg,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network security group {0}!'.format(name)
return ret
def security_rule_absent(name, security_group, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a security rule does not exist in the network security group.
:param name:
Name of the security rule.
:param security_group:
The network security group containing the security rule.
:param resource_group:
The resource group assigned to the network security group.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rule = __salt__['azurearm_network.security_rule_get'](
name,
security_group,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rule:
ret['result'] = True
ret['comment'] = 'Security rule {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Security rule {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rule,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.security_rule_delete'](name, security_group, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Security rule {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rule,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete security rule {0}!'.format(name)
return ret
def load_balancer_present(name, resource_group, sku=None, frontend_ip_configurations=None, backend_address_pools=None,
load_balancing_rules=None, probes=None, inbound_nat_rules=None, inbound_nat_pools=None,
outbound_nat_rules=None, tags=None, connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurearm_network.load_balancer_present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
- azurearm_network: Ensure public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in load_bal:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](load_bal.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('frontend_ip_configurations', []),
frontend_ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"frontend_ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['frontend_ip_configurations'] = comp_ret['changes']
# backend_address_pools changes
if backend_address_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('backend_address_pools', []),
backend_address_pools
)
if comp_ret.get('comment'):
ret['comment'] = '"backend_address_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['backend_address_pools'] = comp_ret['changes']
# probes changes
if probes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](load_bal.get('probes', []), probes)
if comp_ret.get('comment'):
ret['comment'] = '"probes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['probes'] = comp_ret['changes']
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('load_balancing_rules', []),
load_balancing_rules,
['frontend_ip_configuration', 'backend_address_pool', 'probe']
)
if comp_ret.get('comment'):
ret['comment'] = '"load_balancing_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['load_balancing_rules'] = comp_ret['changes']
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_rules', []),
inbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_rules'] = comp_ret['changes']
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('inbound_nat_pools', []),
inbound_nat_pools,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"inbound_nat_pools" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['inbound_nat_pools'] = comp_ret['changes']
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
load_bal.get('outbound_nat_rules', []),
outbound_nat_rules,
['frontend_ip_configuration']
)
if comp_ret.get('comment'):
ret['comment'] = '"outbound_nat_rules" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['outbound_nat_rules'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Load balancer {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Load balancer {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'sku': sku,
'tags': tags,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'load_balancing_rules': load_balancing_rules,
'probes': probes,
'inbound_nat_rules': inbound_nat_rules,
'inbound_nat_pools': inbound_nat_pools,
'outbound_nat_rules': outbound_nat_rules,
}
}
if __opts__['test']:
ret['comment'] = 'Load balancer {0} would be created.'.format(name)
ret['result'] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = __salt__['azurearm_network.load_balancer_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs
)
if 'error' not in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create load balancer {0}! ({1})'.format(name, load_bal.get('error'))
return ret
def load_balancer_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
load_bal = __salt__['azurearm_network.load_balancer_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in load_bal:
ret['result'] = True
ret['comment'] = 'Load balancer {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Load balancer {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': load_bal,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.load_balancer_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Load balancer {0} has been deleted.'.format(name)
ret['changes'] = {
'old': load_bal,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete load balancer {0}!'.format(name)
return ret
def public_ip_address_present(name, resource_group, tags=None, sku=None, public_ip_allocation_method=None,
public_ip_address_version=None, dns_settings=None, idle_timeout_in_minutes=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address exists.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param dns_settings:
An optional dictionary representing a valid PublicIPAddressDnsSettings object. Parameters include
'domain_name_label' and 'reverse_fqdn', which accept strings. The 'domain_name_label' parameter is concatenated
with the regionalized DNS zone make up the fully qualified domain name associated with the public IP address.
If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS
system. The 'reverse_fqdn' parameter is a user-visible, fully qualified domain name that resolves to this public
IP address. If the reverse FQDN is specified, then a PTR DNS record is created pointing from the IP address in
the in-addr.arpa domain to the reverse FQDN.
:param sku:
The public IP address SKU, which can be 'Basic' or 'Standard'.
:param public_ip_allocation_method:
The public IP allocation method. Possible values are: 'Static' and 'Dynamic'.
:param public_ip_address_version:
The public IP address version. Possible values are: 'IPv4' and 'IPv6'.
:param idle_timeout_in_minutes:
An integer representing the idle timeout of the public IP address.
:param tags:
A dictionary of strings can be passed as tag metadata to the public IP address object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure public IP exists:
azurearm_network.public_ip_address_present:
- name: pub_ip1
- resource_group: group1
- dns_settings:
domain_name_label: decisionlab-ext-test-label
- sku: basic
- public_ip_allocation_method: static
- public_ip_address_version: ipv4
- idle_timeout_in_minutes: 4
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
if sku:
sku = {'name': sku.capitalize()}
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in pub_ip:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key] != pub_ip.get('dns_settings', {}).get(key):
ret['changes']['dns_settings'] = {
'old': pub_ip.get('dns_settings'),
'new': dns_settings
}
break
# sku changes
if sku:
sku_changes = __utils__['dictdiffer.deep_diff'](pub_ip.get('sku', {}), sku)
if sku_changes:
ret['changes']['sku'] = sku_changes
# public_ip_allocation_method changes
if public_ip_allocation_method:
if public_ip_allocation_method.capitalize() != pub_ip.get('public_ip_allocation_method'):
ret['changes']['public_ip_allocation_method'] = {
'old': pub_ip.get('public_ip_allocation_method'),
'new': public_ip_allocation_method
}
# public_ip_address_version changes
if public_ip_address_version:
if public_ip_address_version.lower() != pub_ip.get('public_ip_address_version', '').lower():
ret['changes']['public_ip_address_version'] = {
'old': pub_ip.get('public_ip_address_version'),
'new': public_ip_address_version
}
# idle_timeout_in_minutes changes
if idle_timeout_in_minutes and (int(idle_timeout_in_minutes) != pub_ip.get('idle_timeout_in_minutes')):
ret['changes']['idle_timeout_in_minutes'] = {
'old': pub_ip.get('idle_timeout_in_minutes'),
'new': idle_timeout_in_minutes
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Public IP address {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Public IP address {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'dns_settings': dns_settings,
'sku': sku,
'public_ip_allocation_method': public_ip_allocation_method,
'public_ip_address_version': public_ip_address_version,
'idle_timeout_in_minutes': idle_timeout_in_minutes,
}
}
if __opts__['test']:
ret['comment'] = 'Public IP address {0} would be created.'.format(name)
ret['result'] = None
return ret
pub_ip_kwargs = kwargs.copy()
pub_ip_kwargs.update(connection_auth)
pub_ip = __salt__['azurearm_network.public_ip_address_create_or_update'](
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
dns_settings=dns_settings,
public_ip_allocation_method=public_ip_allocation_method,
public_ip_address_version=public_ip_address_version,
idle_timeout_in_minutes=idle_timeout_in_minutes,
**pub_ip_kwargs
)
if 'error' not in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create public IP address {0}! ({1})'.format(name, pub_ip.get('error'))
return ret
def public_ip_address_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a public IP address does not exist in the resource group.
:param name:
Name of the public IP address.
:param resource_group:
The resource group assigned to the public IP address.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
pub_ip = __salt__['azurearm_network.public_ip_address_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in pub_ip:
ret['result'] = True
ret['comment'] = 'Public IP address {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Public IP address {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': pub_ip,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.public_ip_address_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Public IP address {0} has been deleted.'.format(name)
ret['changes'] = {
'old': pub_ip,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete public IP address {0}!'.format(name)
return ret
def network_interface_present(name, ip_configurations, subnet, virtual_network, resource_group, tags=None,
virtual_machine=None, network_security_group=None, dns_settings=None, mac_address=None,
primary=None, enable_accelerated_networking=None, enable_ip_forwarding=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a network interface exists.
:param name:
Name of the network interface.
:param ip_configurations:
A list of dictionaries representing valid NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet:
Name of the existing subnet assigned to the network interface.
:param virtual_network:
Name of the existing virtual network containing the subnet.
:param resource_group:
The resource group assigned to the virtual network.
:param tags:
A dictionary of strings can be passed as tag metadata to the network interface object.
:param network_security_group:
The name of the existing network security group to assign to the network interface.
:param virtual_machine:
The name of the existing virtual machine to assign to the network interface.
:param dns_settings:
An optional dictionary representing a valid NetworkInterfaceDnsSettings object. Valid parameters are:
- ``dns_servers``: List of DNS server IP addresses. Use 'AzureProvidedDNS' to switch to Azure provided DNS
resolution. 'AzureProvidedDNS' value cannot be combined with other IPs, it must be the only value in
dns_servers collection.
- ``internal_dns_name_label``: Relative DNS name for this NIC used for internal communications between VMs in
the same virtual network.
- ``internal_fqdn``: Fully qualified DNS name supporting internal communications between VMs in the same virtual
network.
- ``internal_domain_name_suffix``: Even if internal_dns_name_label is not specified, a DNS entry is created for
the primary NIC of the VM. This DNS name can be constructed by concatenating the VM name with the value of
internal_domain_name_suffix.
:param mac_address:
Optional string containing the MAC address of the network interface.
:param primary:
Optional boolean allowing the interface to be set as the primary network interface on a virtual machine
with multiple interfaces attached.
:param enable_accelerated_networking:
Optional boolean indicating whether accelerated networking should be enabled for the interface.
:param enable_ip_forwarding:
Optional boolean indicating whether IP forwarding should be enabled for the interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure network interface exists:
azurearm_network.network_interface_present:
- name: iface1
- subnet: vnet1_sn1
- virtual_network: vnet1
- resource_group: group1
- ip_configurations:
- name: iface1_ipc1
public_ip_address: pub_ip2
- dns_settings:
internal_dns_name_label: decisionlab-int-test-label
- primary: True
- enable_accelerated_networking: True
- enable_ip_forwarding: False
- network_security_group: nsg1
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure subnet exists
- azurearm_network: Ensure network security group exists
- azurearm_network: Ensure another public IP exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in iface:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](iface.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# mac_address changes
if mac_address and (mac_address != iface.get('mac_address')):
ret['changes']['mac_address'] = {
'old': iface.get('mac_address'),
'new': mac_address
}
# primary changes
if primary is not None:
if primary != iface.get('primary', True):
ret['changes']['primary'] = {
'old': iface.get('primary'),
'new': primary
}
# enable_accelerated_networking changes
if enable_accelerated_networking is not None:
if enable_accelerated_networking != iface.get('enable_accelerated_networking'):
ret['changes']['enable_accelerated_networking'] = {
'old': iface.get('enable_accelerated_networking'),
'new': enable_accelerated_networking
}
# enable_ip_forwarding changes
if enable_ip_forwarding is not None:
if enable_ip_forwarding != iface.get('enable_ip_forwarding'):
ret['changes']['enable_ip_forwarding'] = {
'old': iface.get('enable_ip_forwarding'),
'new': enable_ip_forwarding
}
# network_security_group changes
nsg_name = None
if iface.get('network_security_group'):
nsg_name = iface['network_security_group']['id'].split('/')[-1]
if network_security_group and (network_security_group != nsg_name):
ret['changes']['network_security_group'] = {
'old': nsg_name,
'new': network_security_group
}
# virtual_machine changes
vm_name = None
if iface.get('virtual_machine'):
vm_name = iface['virtual_machine']['id'].split('/')[-1]
if virtual_machine and (virtual_machine != vm_name):
ret['changes']['virtual_machine'] = {
'old': vm_name,
'new': virtual_machine
}
# dns_settings changes
if dns_settings:
if not isinstance(dns_settings, dict):
ret['comment'] = 'DNS settings must be provided as a dictionary!'
return ret
for key in dns_settings:
if dns_settings[key].lower() != iface.get('dns_settings', {}).get(key, '').lower():
ret['changes']['dns_settings'] = {
'old': iface.get('dns_settings'),
'new': dns_settings
}
break
# ip_configurations changes
comp_ret = __utils__['azurearm.compare_list_of_dicts'](
iface.get('ip_configurations', []),
ip_configurations,
['public_ip_address', 'subnet']
)
if comp_ret.get('comment'):
ret['comment'] = '"ip_configurations" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['ip_configurations'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Network interface {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Network interface {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'ip_configurations': ip_configurations,
'dns_settings': dns_settings,
'network_security_group': network_security_group,
'virtual_machine': virtual_machine,
'enable_accelerated_networking': enable_accelerated_networking,
'enable_ip_forwarding': enable_ip_forwarding,
'mac_address': mac_address,
'primary': primary,
'tags': tags,
}
}
if __opts__['test']:
ret['comment'] = 'Network interface {0} would be created.'.format(name)
ret['result'] = None
return ret
iface_kwargs = kwargs.copy()
iface_kwargs.update(connection_auth)
iface = __salt__['azurearm_network.network_interface_create_or_update'](
name=name,
subnet=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
ip_configurations=ip_configurations,
dns_settings=dns_settings,
enable_accelerated_networking=enable_accelerated_networking,
enable_ip_forwarding=enable_ip_forwarding,
mac_address=mac_address,
primary=primary,
network_security_group=network_security_group,
virtual_machine=virtual_machine,
tags=tags,
**iface_kwargs
)
if 'error' not in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create network interface {0}! ({1})'.format(name, iface.get('error'))
return ret
def network_interface_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a network interface does not exist in the resource group.
:param name:
Name of the network interface.
:param resource_group:
The resource group assigned to the network interface.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
iface = __salt__['azurearm_network.network_interface_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in iface:
ret['result'] = True
ret['comment'] = 'Network interface {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Network interface {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': iface,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.network_interface_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Network interface {0} has been deleted.'.format(name)
ret['changes'] = {
'old': iface,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete network interface {0}!)'.format(name)
return ret
def route_table_present(name, resource_group, tags=None, routes=None, disable_bgp_route_propagation=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route table exists.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param routes:
An optional list of dictionaries representing valid Route objects contained within a route table. See the
documentation for the route_present state or route_create_or_update execution module for more information on
required and optional parameters for routes. The routes are only managed if this parameter is present. When this
parameter is absent, implemented routes will not be removed, and will merely become unmanaged.
:param disable_bgp_route_propagation:
An optional boolean parameter setting whether to disable the routes learned by BGP on the route table.
:param tags:
A dictionary of strings can be passed as tag metadata to the route table object.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route table exists:
azurearm_network.route_table_present:
- name: rt1
- resource_group: group1
- routes:
- name: rt1_route1
address_prefix: '0.0.0.0/0'
next_hop_type: internet
- name: rt1_route2
address_prefix: '192.168.0.0/16'
next_hop_type: vnetlocal
- tags:
contact_name: Elmer Fudd Gantry
- connection_auth: {{ profile }}
- require:
- azurearm_resource: Ensure resource group exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in rt_tbl:
# tag changes
tag_changes = __utils__['dictdiffer.deep_diff'](rt_tbl.get('tags', {}), tags or {})
if tag_changes:
ret['changes']['tags'] = tag_changes
# disable_bgp_route_propagation changes
# pylint: disable=line-too-long
if disable_bgp_route_propagation and (disable_bgp_route_propagation != rt_tbl.get('disable_bgp_route_propagation')):
ret['changes']['disable_bgp_route_propagation'] = {
'old': rt_tbl.get('disable_bgp_route_propagation'),
'new': disable_bgp_route_propagation
}
# routes changes
if routes:
comp_ret = __utils__['azurearm.compare_list_of_dicts'](rt_tbl.get('routes', []), routes)
if comp_ret.get('comment'):
ret['comment'] = '"routes" {0}'.format(comp_ret['comment'])
return ret
if comp_ret.get('changes'):
ret['changes']['routes'] = comp_ret['changes']
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route table {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route table {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'tags': tags,
'routes': routes,
'disable_bgp_route_propagation': disable_bgp_route_propagation,
}
}
if __opts__['test']:
ret['comment'] = 'Route table {0} would be created.'.format(name)
ret['result'] = None
return ret
rt_tbl_kwargs = kwargs.copy()
rt_tbl_kwargs.update(connection_auth)
rt_tbl = __salt__['azurearm_network.route_table_create_or_update'](
name=name,
resource_group=resource_group,
disable_bgp_route_propagation=disable_bgp_route_propagation,
routes=routes,
tags=tags,
**rt_tbl_kwargs
)
if 'error' not in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route table {0}! ({1})'.format(name, rt_tbl.get('error'))
return ret
def route_table_absent(name, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
rt_tbl = __salt__['azurearm_network.route_table_get'](
name,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in rt_tbl:
ret['result'] = True
ret['comment'] = 'Route table {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route table {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': rt_tbl,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_table_delete'](name, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route table {0} has been deleted.'.format(name)
ret['changes'] = {
'old': rt_tbl,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route table {0}!'.format(name)
return ret
def route_present(name, address_prefix, next_hop_type, route_table, resource_group, next_hop_ip_address=None,
connection_auth=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Ensure a route exists within a route table.
:param name:
Name of the route.
:param address_prefix:
The destination CIDR to which the route applies.
:param next_hop_type:
The type of Azure hop the packet should be sent to. Possible values are: 'VirtualNetworkGateway', 'VnetLocal',
'Internet', 'VirtualAppliance', and 'None'.
:param next_hop_ip_address:
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop
type is 'VirtualAppliance'.
:param route_table:
The name of the existing route table which will contain the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure route exists:
azurearm_network.route_present:
- name: rt1_route2
- route_table: rt1
- resource_group: group1
- address_prefix: '192.168.0.0/16'
- next_hop_type: vnetlocal
- connection_auth: {{ profile }}
- require:
- azurearm_network: Ensure route table exists
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' not in route:
if address_prefix != route.get('address_prefix'):
ret['changes']['address_prefix'] = {
'old': route.get('address_prefix'),
'new': address_prefix
}
if next_hop_type.lower() != route.get('next_hop_type', '').lower():
ret['changes']['next_hop_type'] = {
'old': route.get('next_hop_type'),
'new': next_hop_type
}
if next_hop_type.lower() == 'virtualappliance' and next_hop_ip_address != route.get('next_hop_ip_address'):
ret['changes']['next_hop_ip_address'] = {
'old': route.get('next_hop_ip_address'),
'new': next_hop_ip_address
}
if not ret['changes']:
ret['result'] = True
ret['comment'] = 'Route {0} is already present.'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Route {0} would be updated.'.format(name)
return ret
else:
ret['changes'] = {
'old': {},
'new': {
'name': name,
'address_prefix': address_prefix,
'next_hop_type': next_hop_type,
'next_hop_ip_address': next_hop_ip_address
}
}
if __opts__['test']:
ret['comment'] = 'Route {0} would be created.'.format(name)
ret['result'] = None
return ret
route_kwargs = kwargs.copy()
route_kwargs.update(connection_auth)
route = __salt__['azurearm_network.route_create_or_update'](
name=name,
route_table=route_table,
resource_group=resource_group,
address_prefix=address_prefix,
next_hop_type=next_hop_type,
next_hop_ip_address=next_hop_ip_address,
**route_kwargs
)
if 'error' not in route:
ret['result'] = True
ret['comment'] = 'Route {0} has been created.'.format(name)
return ret
ret['comment'] = 'Failed to create route {0}! ({1})'.format(name, route.get('error'))
return ret
def route_absent(name, route_table, resource_group, connection_auth=None):
'''
.. versionadded:: 2019.2.0
Ensure a route table does not exist in the resource group.
:param name:
Name of the route table.
:param route_table:
The name of the existing route table containing the route.
:param resource_group:
The resource group assigned to the route table.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
'''
ret = {
'name': name,
'result': False,
'comment': '',
'changes': {}
}
if not isinstance(connection_auth, dict):
ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
return ret
route = __salt__['azurearm_network.route_get'](
name,
route_table,
resource_group,
azurearm_log_level='info',
**connection_auth
)
if 'error' in route:
ret['result'] = True
ret['comment'] = 'Route {0} was not found.'.format(name)
return ret
elif __opts__['test']:
ret['comment'] = 'Route {0} would be deleted.'.format(name)
ret['result'] = None
ret['changes'] = {
'old': route,
'new': {},
}
return ret
deleted = __salt__['azurearm_network.route_delete'](name, route_table, resource_group, **connection_auth)
if deleted:
ret['result'] = True
ret['comment'] = 'Route {0} has been deleted.'.format(name)
ret['changes'] = {
'old': route,
'new': {}
}
return ret
ret['comment'] = 'Failed to delete route {0}!'.format(name)
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.